Пример #1
0
def overrideupdate(orig, ui, repo, *pats, **opts):
    lfdirstate = lfutil.openlfdirstate(ui, repo)
    s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
        False, False)
    (unsure, modified, added, removed, missing, unknown, ignored, clean) = s

    # Need to lock between the standins getting updated and their
    # largefiles getting updated
    wlock = repo.wlock()
    try:
        if opts['check']:
            mod = len(modified) > 0
            for lfile in unsure:
                standin = lfutil.standin(lfile)
                if repo['.'][standin].data().strip() != \
                        lfutil.hashfile(repo.wjoin(lfile)):
                    mod = True
                else:
                    lfdirstate.normal(lfile)
            lfdirstate.write()
            if mod:
                raise util.Abort(_('uncommitted changes'))
        # XXX handle removed differently
        if not opts['clean']:
            for lfile in unsure + modified + added:
                lfutil.updatestandin(repo, lfutil.standin(lfile))
    finally:
        wlock.release()
    return orig(ui, repo, *pats, **opts)
Пример #2
0
def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
    if not shallowutil.isenabled(repo):
        return orig(repo, outgoing, version, source, *args, **kwargs)

    original = repo.shallowmatch
    try:
        # if serving, only send files the clients has patterns for
        if source == 'serve':
            bundlecaps = kwargs.get(r'bundlecaps')
            includepattern = None
            excludepattern = None
            for cap in (bundlecaps or []):
                if cap.startswith("includepattern="):
                    raw = cap[len("includepattern="):]
                    if raw:
                        includepattern = raw.split('\0')
                elif cap.startswith("excludepattern="):
                    raw = cap[len("excludepattern="):]
                    if raw:
                        excludepattern = raw.split('\0')
            if includepattern or excludepattern:
                repo.shallowmatch = match.match(repo.root, '', None,
                                                includepattern, excludepattern)
            else:
                repo.shallowmatch = match.always()
        return orig(repo, outgoing, version, source, *args, **kwargs)
    finally:
        repo.shallowmatch = original
Пример #3
0
    def diff(self, m2, match=None, clean=False):
        self.load()
        if isinstance(m2, overlaymanifest):
            m2.load()

        # below code copied from manifest.py:manifestdict.diff
        diff = {}

        if match is None:
            match = matchmod.always(b'', b'')
        for fn, n1 in self.iteritems():
            if not match(fn):
                continue
            fl1 = self._flags.get(fn)
            n2 = m2.get(fn, None)
            fl2 = m2.flags(fn)
            if n2 is None:
                fl2 = b''
            if n1 != n2 or fl1 != fl2:
                diff[fn] = ((n1, fl1), (n2, fl2))
            elif clean:
                diff[fn] = None

        for fn, n2 in m2.iteritems():
            if fn not in self:
                if not match(fn):
                    continue
                fl2 = m2.flags(fn)
                diff[fn] = ((None, b''), (n2, fl2))

        return diff
Пример #4
0
def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
                     similarity=None):
    if not lfutil.islfilesrepo(repo):
        return orig(repo, pats, opts, dry_run, similarity)
    # Get the list of missing largefiles so we can remove them
    lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
    s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
        False, False)
    (unsure, modified, added, removed, missing, unknown, ignored, clean) = s

    # Call into the normal remove code, but the removing of the standin, we want
    # to have handled by original addremove.  Monkey patching here makes sure
    # we don't remove the standin in the largefiles code, preventing a very
    # confused state later.
    if missing:
        m = [repo.wjoin(f) for f in missing]
        repo._isaddremove = True
        removelargefiles(repo.ui, repo, *m, **opts)
        repo._isaddremove = False
    # Call into the normal add code, and any files that *should* be added as
    # largefiles will be
    addlargefiles(repo.ui, repo, *pats, **opts)
    # Now that we've handled largefiles, hand off to the original addremove
    # function to take care of the rest.  Make sure it doesn't do anything with
    # largefiles by installing a matcher that will ignore them.
    installnormalfilesmatchfn(repo[None].manifest())
    result = orig(repo, pats, opts, dry_run, similarity)
    restorematchfn()
    return result
Пример #5
0
def getchangegroup(orig, repo, source, heads=None, common=None, bundlecaps=None):
    if not requirement in repo.requirements:
        return orig(repo, source, heads=heads, common=common,
                    bundlecaps=bundlecaps)

    original = repo.shallowmatch
    try:
        # if serving, only send files the clients has patterns for
        if source == 'serve':
            includepattern = None
            excludepattern = None
            for cap in (bundlecaps or []):
                if cap.startswith("includepattern="):
                    raw = cap[len("includepattern="):]
                    if raw:
                        includepattern = raw.split('\0')
                elif cap.startswith("excludepattern="):
                    raw = cap[len("excludepattern="):]
                    if raw:
                        excludepattern = raw.split('\0')
            if includepattern or excludepattern:
                repo.shallowmatch = match.match(repo.root, '', None,
                    includepattern, excludepattern)
            else:
                repo.shallowmatch = match.always(repo.root, '')
        return orig(repo, source, heads, common, bundlecaps)
    finally:
        repo.shallowmatch = original
Пример #6
0
def scmutiladdremove(orig,
                     repo,
                     pats=[],
                     opts={},
                     dry_run=None,
                     similarity=None):
    if not lfutil.islfilesrepo(repo):
        return orig(repo, pats, opts, dry_run, similarity)
    # Get the list of missing largefiles so we can remove them
    lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
    s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
                          False, False)
    (unsure, modified, added, removed, missing, unknown, ignored, clean) = s

    # Call into the normal remove code, but the removing of the standin, we want
    # to have handled by original addremove.  Monkey patching here makes sure
    # we don't remove the standin in the largefiles code, preventing a very
    # confused state later.
    if missing:
        m = [repo.wjoin(f) for f in missing]
        repo._isaddremove = True
        removelargefiles(repo.ui, repo, *m, **opts)
        repo._isaddremove = False
    # Call into the normal add code, and any files that *should* be added as
    # largefiles will be
    addlargefiles(repo.ui, repo, *pats, **opts)
    # Now that we've handled largefiles, hand off to the original addremove
    # function to take care of the rest.  Make sure it doesn't do anything with
    # largefiles by installing a matcher that will ignore them.
    installnormalfilesmatchfn(repo[None].manifest())
    result = orig(repo, pats, opts, dry_run, similarity)
    restorematchfn()
    return result
Пример #7
0
def overrideupdate(orig, ui, repo, *pats, **opts):
    lfdirstate = lfutil.openlfdirstate(ui, repo)
    s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
                          False, False)
    (unsure, modified, added, removed, missing, unknown, ignored, clean) = s

    # Need to lock between the standins getting updated and their
    # largefiles getting updated
    wlock = repo.wlock()
    try:
        if opts['check']:
            mod = len(modified) > 0
            for lfile in unsure:
                standin = lfutil.standin(lfile)
                if repo['.'][standin].data().strip() != \
                        lfutil.hashfile(repo.wjoin(lfile)):
                    mod = True
                else:
                    lfdirstate.normal(lfile)
            lfdirstate.write()
            if mod:
                raise util.Abort(_('uncommitted local changes'))
        # XXX handle removed differently
        if not opts['clean']:
            for lfile in unsure + modified + added:
                lfutil.updatestandin(repo, lfutil.standin(lfile))
    finally:
        wlock.release()
    return orig(ui, repo, *pats, **opts)
Пример #8
0
    def status(self, paths=(), *k, **kw):
        # XXX: regursive kwargs
        # XXX: merce conflicts ?!
        names = (
            'modified',
            'added',
            'removed',
            'missing',
            'unknown',
            'ignored',
            'clean',
        )

        if paths:
            # XXX: investigate cwd arg
            matcher = match(
                self.repo.root,
                self.repo.root,
                paths,
                default='relpath',
            )
        else:
            matcher = always(self.repo.root, self.repo.root)

        state_files = self.repo.status(
            match=matcher,
            ignored=True,
            unknown=True,
            clean=True,
        )
        for state, files in zip(names, state_files):
            for file in files:
                yield StatedPath(file, state, base=self.path)
Пример #9
0
def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
    if not requirement in repo.requirements:
        return orig(repo, outgoing, version, source, *args, **kwargs)

    original = repo.shallowmatch
    try:
        # if serving, only send files the clients has patterns for
        if source == 'serve':
            bundlecaps = kwargs.get('bundlecaps')
            includepattern = None
            excludepattern = None
            for cap in (bundlecaps or []):
                if cap.startswith("includepattern="):
                    raw = cap[len("includepattern="):]
                    if raw:
                        includepattern = raw.split('\0')
                elif cap.startswith("excludepattern="):
                    raw = cap[len("excludepattern="):]
                    if raw:
                        excludepattern = raw.split('\0')
            if includepattern or excludepattern:
                repo.shallowmatch = match.match(repo.root, '', None,
                    includepattern, excludepattern)
            else:
                repo.shallowmatch = match.always(repo.root, '')
        return orig(repo, outgoing, version, source, *args, **kwargs)
    finally:
        repo.shallowmatch = original
Пример #10
0
def diffs(repo, tmpl, ctx, basectx, files, parity, style):
    def countgen():
        start = 1
        while True:
            yield start
            start += 1

    blockcount = countgen()

    def prettyprintlines(diff, blockno):
        for lineno, l in enumerate(diff.splitlines(True)):
            difflineno = "%d.%d" % (blockno, lineno + 1)
            if l.startswith('+'):
                ltype = "difflineplus"
            elif l.startswith('-'):
                ltype = "difflineminus"
            elif l.startswith('@'):
                ltype = "difflineat"
            else:
                ltype = "diffline"
            yield tmpl(ltype,
                       line=l,
                       lineno=lineno + 1,
                       lineid="l%s" % difflineno,
                       linenumber="% 8s" % difflineno)

    if files:
        m = match.exact(repo.root, repo.getcwd(), files)
    else:
        m = match.always(repo.root, repo.getcwd())

    diffopts = patch.diffopts(repo.ui, untrusted=True)
    if basectx is None:
        parents = ctx.parents()
        if parents:
            node1 = parents[0].node()
        else:
            node1 = nullid
    else:
        node1 = basectx.node()
    node2 = ctx.node()

    block = []
    for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
        if chunk.startswith('diff') and block:
            blockno = blockcount.next()
            yield tmpl('diffblock',
                       parity=parity.next(),
                       blockno=blockno,
                       lines=prettyprintlines(''.join(block), blockno))
            block = []
        if chunk.startswith('diff') and style != 'raw':
            chunk = ''.join(chunk.splitlines(True)[1:])
        block.append(chunk)
    blockno = blockcount.next()
    yield tmpl('diffblock',
               parity=parity.next(),
               blockno=blockno,
               lines=prettyprintlines(''.join(block), blockno))
Пример #11
0
 def refresh(self):
     hglib.invalidaterepo(self.repo)
     matcher = match.always(self.repo.root, self.repo.root)
     try:
         unknown = self.repo.status(match=matcher, unknown=True)[4]
     except (EnvironmentError, util.Abort), inst:
         gdialog.Prompt(_('Error while reading status'),
                        hglib.toutf(str(inst)), self).run()
         return
Пример #12
0
def diffs(repo, tmpl, ctx, basectx, files, parity, style):

    def countgen():
        start = 1
        while True:
            yield start
            start += 1

    blockcount = countgen()
    def prettyprintlines(diff, blockno):
        for lineno, l in enumerate(diff.splitlines(True)):
            difflineno = "%d.%d" % (blockno, lineno + 1)
            if l.startswith('+'):
                ltype = "difflineplus"
            elif l.startswith('-'):
                ltype = "difflineminus"
            elif l.startswith('@'):
                ltype = "difflineat"
            else:
                ltype = "diffline"
            yield tmpl(ltype,
                       line=l,
                       lineno=lineno + 1,
                       lineid="l%s" % difflineno,
                       linenumber="% 8s" % difflineno)

    if files:
        m = match.exact(repo.root, repo.getcwd(), files)
    else:
        m = match.always(repo.root, repo.getcwd())

    diffopts = patch.diffopts(repo.ui, untrusted=True)
    if basectx is None:
        parents = ctx.parents()
        if parents:
            node1 = parents[0].node()
        else:
            node1 = nullid
    else:
        node1 = basectx.node()
    node2 = ctx.node()

    block = []
    for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
        if chunk.startswith('diff') and block:
            blockno = blockcount.next()
            yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
                       lines=prettyprintlines(''.join(block), blockno))
            block = []
        if chunk.startswith('diff') and style != 'raw':
            chunk = ''.join(chunk.splitlines(True)[1:])
        block.append(chunk)
    blockno = blockcount.next()
    yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
               lines=prettyprintlines(''.join(block), blockno))
Пример #13
0
def lfdirstatestatus(lfdirstate, repo, rev):
    match = match_.always(repo.root, repo.getcwd())
    s = lfdirstate.status(match, [], False, False, False)
    unsure, modified, added, removed, missing, unknown, ignored, clean = s
    for lfile in unsure:
        if repo[rev][standin(lfile)].data().strip() != \
                hashfile(repo.wjoin(lfile)):
            modified.append(lfile)
        else:
            clean.append(lfile)
            lfdirstate.normal(lfile)
    return (modified, added, removed, missing, unknown, ignored, clean)
Пример #14
0
def lfdirstate_status(lfdirstate, repo, rev):
    match = match_.always(repo.root, repo.getcwd())
    s = lfdirstate.status(match, [], False, False, False)
    unsure, modified, added, removed, missing, unknown, ignored, clean = s
    for lfile in unsure:
        if repo[rev][standin(lfile)].data().strip() != \
                hashfile(repo.wjoin(lfile)):
            modified.append(lfile)
        else:
            clean.append(lfile)
            lfdirstate.normal(lfile)
    return (modified, added, removed, missing, unknown, ignored, clean)
Пример #15
0
 def unknown_thread(self, q):
     hglib.invalidaterepo(self.repo)
     matcher = match.always(self.repo.root, self.repo.root)
     status = self.repo.status(node1=self.repo.dirstate.parents()[0],
                               node2=None, match=matcher, ignored=False,
                               clean=False, unknown=True)
     (modified, added, removed, deleted, unknown, ignored, clean) = status
     for u in unknown:
         q.put( u )
     for a in added:
         if not self.repo.dirstate.copied(a):
             q.put( a )
Пример #16
0
    def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
                 filter=None):
        '''apply the patch in patchfile to the repository as a transplant'''
        (manifest, user, (time, timezone), files, message) = cl[:5]
        date = "%d %d" % (time, timezone)
        extra = {'transplant_source': node}
        if filter:
            (user, date, message) = self.filter(filter, node, cl, patchfile)

        if log:
            # we don't translate messages inserted into commits
            message += '\n(transplanted from %s)' % nodemod.hex(node)

        self.ui.status(_('applying %s\n') % nodemod.short(node))
        self.ui.note('%s %s\n%s\n' % (user, date, message))

        if not patchfile and not merge:
            raise error.Abort(_('can only omit patchfile if merging'))
        if patchfile:
            try:
                files = set()
                patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
                files = list(files)
            except Exception as inst:
                seriespath = os.path.join(self.path, 'series')
                if os.path.exists(seriespath):
                    os.unlink(seriespath)
                p1 = repo.dirstate.p1()
                p2 = node
                self.log(user, date, message, p1, p2, merge=merge)
                self.ui.write(str(inst) + '\n')
                raise TransplantError(_('fix up the working directory and run '
                                        'hg transplant --continue'))
        else:
            files = None
        if merge:
            p1, p2 = repo.dirstate.parents()
            repo.setparents(p1, node)
            m = match.always(repo.root, '')
        else:
            m = match.exact(repo.root, '', files)

        n = repo.commit(message, user, date, extra=extra, match=m,
                        editor=self.getcommiteditor())
        if not n:
            self.ui.warn(_('skipping emptied changeset %s\n') %
                           nodemod.short(node))
            return None
        if not merge:
            self.transplants.set(n, node)

        return n
Пример #17
0
    def _walkstreamfiles(orig, repo):
        if state.shallowremote:
            # if we are shallow ourselves, stream our local commits
            if shallowrepo.requirement in repo.requirements:
                striplen = len(repo.store.path) + 1
                readdir = repo.store.rawvfs.readdir
                visit = [os.path.join(repo.store.path, 'data')]
                while visit:
                    p = visit.pop()
                    for f, kind, st in readdir(p, stat=True):
                        fp = p + '/' + f
                        if kind == stat.S_IFREG:
                            if not fp.endswith('.i') and not fp.endswith('.d'):
                                n = util.pconvert(fp[striplen:])
                                yield (store.decodedir(n), n, st.st_size)
                        if kind == stat.S_IFDIR:
                            visit.append(fp)

            shallowtrees = repo.ui.configbool('remotefilelog', 'shallowtrees',
                                              False)
            if 'treemanifest' in repo.requirements and not shallowtrees:
                for (u, e, s) in repo.store.datafiles():
                    if (u.startswith('meta/') and
                        (u.endswith('.i') or u.endswith('.d'))):
                        yield (u, e, s)

            # Return .d and .i files that do not match the shallow pattern
            match = state.match
            if match and not match.always():
                for (u, e, s) in repo.store.datafiles():
                    f = u[5:-2]  # trim data/...  and .i/.d
                    if not state.match(f):
                        yield (u, e, s)

            for x in repo.store.topfiles():
                if shallowtrees and x[0][:15] == '00manifesttree.':
                    continue
                if state.noflatmf and x[0][:11] == '00manifest.':
                    continue
                yield x

        elif shallowrepo.requirement in repo.requirements:
            # don't allow cloning from a shallow repo to a full repo
            # since it would require fetching every version of every
            # file in order to create the revlogs.
            raise error.Abort(_("Cannot clone from a shallow repo "
                                "to a full repo."))
        else:
            for x in orig(repo):
                yield x
    def _walkstreamfiles(orig, repo, matcher=None):
        if state.shallowremote:
            # if we are shallow ourselves, stream our local commits
            if shallowutil.isenabled(repo):
                striplen = len(repo.store.path) + 1
                readdir = repo.store.rawvfs.readdir
                visit = [os.path.join(repo.store.path, b'data')]
                while visit:
                    p = visit.pop()
                    for f, kind, st in readdir(p, stat=True):
                        fp = p + b'/' + f
                        if kind == stat.S_IFREG:
                            if not fp.endswith(b'.i') and not fp.endswith(
                                    b'.d'):
                                n = util.pconvert(fp[striplen:])
                                d = store.decodedir(n)
                                t = store.FILETYPE_OTHER
                                yield (t, d, n, st.st_size)
                        if kind == stat.S_IFDIR:
                            visit.append(fp)

            if scmutil.istreemanifest(repo):
                for (t, u, e, s) in repo.store.datafiles():
                    if u.startswith(b'meta/') and (u.endswith(b'.i')
                                                   or u.endswith(b'.d')):
                        yield (t, u, e, s)

            # Return .d and .i files that do not match the shallow pattern
            match = state.match
            if match and not match.always():
                for (t, u, e, s) in repo.store.datafiles():
                    f = u[5:-2]  # trim data/...  and .i/.d
                    if not state.match(f):
                        yield (t, u, e, s)

            for x in repo.store.topfiles():
                if state.noflatmf and x[1][:11] == b'00manifest.':
                    continue
                yield x

        elif shallowutil.isenabled(repo):
            # don't allow cloning from a shallow repo to a full repo
            # since it would require fetching every version of every
            # file in order to create the revlogs.
            raise error.Abort(
                _(b"Cannot clone from a shallow repo to a full repo."))
        else:
            for x in orig(repo, matcher):
                yield x
Пример #19
0
def lfdirstatestatus(lfdirstate, repo, rev):
    match = match_.always(repo.root, repo.getcwd())
    s = lfdirstate.status(match, [], False, False, False)
    unsure, modified, added, removed, missing, unknown, ignored, clean = s
    for lfile in unsure:
        try:
            fctx = repo[rev][standin(lfile)]
        except LookupError:
            fctx = None
        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
            modified.append(lfile)
        else:
            clean.append(lfile)
            lfdirstate.normal(lfile)
    return (modified, added, removed, missing, unknown, ignored, clean)
Пример #20
0
def lfdirstatestatus(lfdirstate, repo):
    wctx = repo['.']
    match = match_.always(repo.root, repo.getcwd())
    unsure, s = lfdirstate.status(match, [], False, False, False)
    modified, clean = s.modified, s.clean
    for lfile in unsure:
        try:
            fctx = wctx[standin(lfile)]
        except LookupError:
            fctx = None
        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
            modified.append(lfile)
        else:
            clean.append(lfile)
            lfdirstate.normal(lfile)
    return s
Пример #21
0
def lfdirstatestatus(lfdirstate, repo):
    wctx = repo['.']
    match = match_.always(repo.root, repo.getcwd())
    unsure, s = lfdirstate.status(match, [], False, False, False)
    modified, clean = s.modified, s.clean
    for lfile in unsure:
        try:
            fctx = wctx[standin(lfile)]
        except LookupError:
            fctx = None
        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
            modified.append(lfile)
        else:
            clean.append(lfile)
            lfdirstate.normal(lfile)
    return s
Пример #22
0
def bfdirstate_status(bfdirstate, repo, rev):
    wlock = repo.wlock()
    try:
        match = match_.always(repo.root, repo.getcwd())
        s = bfdirstate.status(match, [], False, False, False)
        unsure, modified, added, removed, missing, unknown, ignored, clean = s
        for bfile in unsure:
            if repo[rev][standin(bfile)].data().strip() != hashfile(repo.wjoin(bfile)):
                modified.append(bfile)
            else:
                clean.append(bfile)
                bfdirstate.normal(unixpath(bfile))
        bfdirstate.write()
    finally:
        wlock.release()
    return (modified, added, removed, missing, unknown, ignored, clean)
Пример #23
0
def lfdirstatestatus(lfdirstate, repo):
    pctx = repo['.']
    match = matchmod.always()
    unsure, s = lfdirstate.status(match, subrepos=[], ignored=False,
                                  clean=False, unknown=False)
    modified, clean = s.modified, s.clean
    for lfile in unsure:
        try:
            fctx = pctx[standin(lfile)]
        except LookupError:
            fctx = None
        if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
            modified.append(lfile)
        else:
            clean.append(lfile)
            lfdirstate.normal(lfile)
    return s
Пример #24
0
def bfdirstate_status(bfdirstate, repo, rev):
    wlock = repo.wlock()
    try:
        match = match_.always(repo.root, repo.getcwd())
        s = bfdirstate.status(match, [], False, False, False)
        (unsure, modified, added, removed, missing, unknown, ignored,
         clean) = s
        for bfile in unsure:
            if repo[rev][standin(bfile)].data().strip() != hashfile(
                    repo.wjoin(bfile)):
                modified.append(bfile)
            else:
                clean.append(bfile)
                bfdirstate.normal(unixpath(bfile))
        bfdirstate.write()
    finally:
        wlock.release()
    return (modified, added, removed, missing, unknown, ignored, clean)
Пример #25
0
def _commit(orig, self, *args, **kwargs):
    if _disabled[0]:
        return orig(self, *args, **kwargs)

    with self.wlock(), self.lock(), self.transaction('dirsynccommit'):
        matcher = args[3] if len(args) >= 4 else kwargs.get('match')
        matcher = matcher or matchmod.always(self.root, '')

        mirroredfiles = _updateworkingcopy(self, matcher)
        if mirroredfiles and not matcher.always():
            origmatch = matcher.matchfn
            def extramatches(path):
                return path in mirroredfiles or origmatch(path)
            matcher.matchfn = extramatches
            matcher._files.extend(mirroredfiles)
            matcher._fileset.update(mirroredfiles)

        return orig(self, *args, **kwargs)
Пример #26
0
    def diff(self, m2, match=None, clean=False):
        # Older mercurial clients used diff(m2, clean=False). If a caller
        # failed to specify clean as a keyword arg, it might get passed as
        # match here.
        if isinstance(match, bool):
            clean = match
            match = None

        self.load()
        if isinstance(m2, overlaymanifest):
            m2.load()

        # below code copied from manifest.py:manifestdict.diff
        diff = {}

        try:
            m2flagget = m2.flags
        except AttributeError:
            # Mercurial <= 3.3
            m2flagget = m2._flags.get

        if match is None:
            match = matchmod.always('', '')
        for fn, n1 in self.iteritems():
            if not match(fn):
                continue
            fl1 = self._flags.get(fn, '')
            n2 = m2.get(fn, None)
            fl2 = m2flagget(fn, '')
            if n2 is None:
                fl2 = ''
            if n1 != n2 or fl1 != fl2:
                diff[fn] = ((n1, fl1), (n2, fl2))
            elif clean:
                diff[fn] = None

        for fn, n2 in m2.iteritems():
            if fn not in self:
                if not match(fn):
                    continue
                fl2 = m2flagget(fn, '')
                diff[fn] = ((None, ''), (n2, fl2))

        return diff
Пример #27
0
    def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source):
        caps = self._bundlecaps or []
        if shallowrepo.requirement in caps:
            # only send files that don't match the specified patterns
            includepattern = None
            excludepattern = None
            for cap in (self._bundlecaps or []):
                if cap.startswith("includepattern="):
                    includepattern = cap[len("includepattern="):].split('\0')
                elif cap.startswith("excludepattern="):
                    excludepattern = cap[len("excludepattern="):].split('\0')

            m = match.always(repo.root, '')
            if includepattern or excludepattern:
                m = match.match(repo.root, '', None, includepattern,
                                excludepattern)

            changedfiles = list([f for f in changedfiles if not m(f)])
        return orig(self, changedfiles, linknodes, commonrevs, source)
    def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source):
        caps = self._bundlecaps or []
        if shallowrepo.requirement in caps:
            # only send files that don't match the specified patterns
            includepattern = None
            excludepattern = None
            for cap in (self._bundlecaps or []):
                if cap.startswith("includepattern="):
                    includepattern = cap[len("includepattern="):].split('\0')
                elif cap.startswith("excludepattern="):
                    excludepattern = cap[len("excludepattern="):].split('\0')

            m = match.always(repo.root, '')
            if includepattern or excludepattern:
                m = match.match(repo.root, '', None,
                    includepattern, excludepattern)

            changedfiles = list([f for f in changedfiles if not m(f)])
        return orig(self, changedfiles, linknodes, commonrevs, source)
Пример #29
0
def diffs(repo, tmpl, ctx, files, parity, style):
    def countgen():
        start = 1
        while True:
            yield start
            start += 1

    blockcount = countgen()

    def prettyprintlines(diff):
        blockno = blockcount.next()
        for lineno, l in enumerate(diff.splitlines(True)):
            lineno = "%d.%d" % (blockno, lineno + 1)
            if l.startswith("+"):
                ltype = "difflineplus"
            elif l.startswith("-"):
                ltype = "difflineminus"
            elif l.startswith("@"):
                ltype = "difflineat"
            else:
                ltype = "diffline"
            yield tmpl(ltype, line=l, lineid="l%s" % lineno, linenumber="% 8s" % lineno)

    if files:
        m = match.exact(repo.root, repo.getcwd(), files)
    else:
        m = match.always(repo.root, repo.getcwd())

    diffopts = patch.diffopts(repo.ui, untrusted=True)
    parents = ctx.parents()
    node1 = parents and parents[0].node() or nullid
    node2 = ctx.node()

    block = []
    for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
        if chunk.startswith("diff") and block:
            yield tmpl("diffblock", parity=parity.next(), lines=prettyprintlines("".join(block)))
            block = []
        if chunk.startswith("diff") and style != "raw":
            chunk = "".join(chunk.splitlines(True)[1:])
        block.append(chunk)
    yield tmpl("diffblock", parity=parity.next(), lines=prettyprintlines("".join(block)))
    def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
                      *args, **kwargs):
        caps = self._bundlecaps or []
        if constants.BUNDLE2_CAPABLITY in caps:
            # only send files that don't match the specified patterns
            includepattern = None
            excludepattern = None
            for cap in self._bundlecaps or []:
                if cap.startswith(b"includepattern="):
                    includepattern = cap[len(b"includepattern="):].split(b'\0')
                elif cap.startswith(b"excludepattern="):
                    excludepattern = cap[len(b"excludepattern="):].split(b'\0')

            m = match.always()
            if includepattern or excludepattern:
                m = match.match(repo.root, b'', None, includepattern,
                                excludepattern)

            changedfiles = list([f for f in changedfiles if not m(f)])
        return orig(self, changedfiles, linknodes, commonrevs, source, *args,
                    **kwargs)
Пример #31
0
    def stream_out_shallow(repo, proto, other):
        includepattern = None
        excludepattern = None
        raw = other.get('includepattern')
        if raw:
            includepattern = raw.split('\0')
        raw = other.get('excludepattern')
        if raw:
            excludepattern = raw.split('\0')

        oldshallow = state.shallowremote
        oldmatch = state.match
        try:
            state.shallowremote = True
            state.match = match.always(repo.root, '')
            if includepattern or excludepattern:
                state.match = match.match(repo.root, '', None, includepattern,
                                          excludepattern)
            return wireproto.stream(repo, proto)
        finally:
            state.shallowremote = oldshallow
            state.match = oldmatch
    def stream_out_shallow(repo, proto, other):
        includepattern = None
        excludepattern = None
        raw = other.get('includepattern')
        if raw:
            includepattern = raw.split('\0')
        raw = other.get('excludepattern')
        if raw:
            excludepattern = raw.split('\0')

        oldshallow = state.shallowremote
        oldmatch = state.match
        try:
            state.shallowremote = True
            state.match = match.always(repo.root, '')
            if includepattern or excludepattern:
                state.match = match.match(repo.root, '', None,
                    includepattern, excludepattern)
            return wireproto.stream(repo, proto)
        finally:
            state.shallowremote = oldshallow
            state.match = oldmatch
    def stream_out_shallow(repo, proto, other):
        includepattern = None
        excludepattern = None
        raw = other.get(b'includepattern')
        if raw:
            includepattern = raw.split(b'\0')
        raw = other.get(b'excludepattern')
        if raw:
            excludepattern = raw.split(b'\0')

        oldshallow = state.shallowremote
        oldmatch = state.match
        oldnoflatmf = state.noflatmf
        try:
            state.shallowremote = True
            state.match = match.always()
            state.noflatmf = other.get(b'noflatmanifest') == b'True'
            if includepattern or excludepattern:
                state.match = match.match(repo.root, b'', None, includepattern,
                                          excludepattern)
            streamres = wireprotov1server.stream(repo, proto)

            # Force the first value to execute, so the file list is computed
            # within the try/finally scope
            first = next(streamres.gen)
            second = next(streamres.gen)

            def gen():
                yield first
                yield second
                for value in streamres.gen:
                    yield value

            return wireprototypes.streamres(gen())
        finally:
            state.shallowremote = oldshallow
            state.match = oldmatch
            state.noflatmf = oldnoflatmf
Пример #34
0
    def stream_out_shallow(repo, proto, other):
        includepattern = None
        excludepattern = None
        raw = other.get('includepattern')
        if raw:
            includepattern = raw.split('\0')
        raw = other.get('excludepattern')
        if raw:
            excludepattern = raw.split('\0')

        oldshallow = state.shallowremote
        oldmatch = state.match
        oldnoflatmf = state.noflatmf
        try:
            state.shallowremote = True
            state.match = match.always(repo.root, '')
            state.noflatmf = other.get('noflatmanifest') == 'True'
            if includepattern or excludepattern:
                state.match = match.match(repo.root, '', None,
                    includepattern, excludepattern)
            streamres = wireprotov1server.stream(repo, proto)

            # Force the first value to execute, so the file list is computed
            # within the try/finally scope
            first = next(streamres.gen)
            second = next(streamres.gen)
            def gen():
                yield first
                yield second
                for value in streamres.gen:
                    yield value
            return wireprototypes.streamres(gen())
        finally:
            state.shallowremote = oldshallow
            state.match = oldmatch
            state.noflatmf = oldnoflatmf
Пример #35
0
    def _walkstreamfiles(orig, repo):
        if state.shallowremote:
            # if we are shallow ourselves, stream our local commits
            if shallowrepo.requirement in repo.requirements:
                striplen = len(repo.store.path) + 1
                readdir = repo.store.rawvfs.readdir
                visit = [os.path.join(repo.store.path, 'data')]
                while visit:
                    p = visit.pop()
                    for f, kind, st in readdir(p, stat=True):
                        fp = p + '/' + f
                        if kind == stat.S_IFREG:
                            if not fp.endswith('.i') and not fp.endswith('.d'):
                                n = util.pconvert(fp[striplen:])
                                yield (store.decodedir(n), n, st.st_size)
                        if kind == stat.S_IFDIR:
                            visit.append(fp)

            # Return .d and .i files that do not match the shallow pattern
            match = state.match or match.always(repo.root, '')
            for (u, e, s) in repo.store.datafiles():
                f = u[5:-2]  # trim data/...  and .i/.d
                if not state.match(f):
                    yield (u, e, s)

            for x in repo.store.topfiles():
                yield x
        elif shallowrepo.requirement in repo.requirements:
            # don't allow cloning from a shallow repo to a full repo
            # since it would require fetching every version of every
            # file in order to create the revlogs.
            raise util.Abort(
                _("Cannot clone from a shallow repo " + "to a full repo."))
        else:
            for x in orig(repo):
                yield x
Пример #36
0
def getchangegroup(orig,
                   repo,
                   source,
                   heads=None,
                   common=None,
                   bundlecaps=None):
    if not requirement in repo.requirements:
        return orig(repo,
                    source,
                    heads=heads,
                    common=common,
                    bundlecaps=bundlecaps)

    original = repo.shallowmatch
    try:
        # if serving, only send files the clients has patterns for
        if source == 'serve':
            includepattern = None
            excludepattern = None
            for cap in (bundlecaps or []):
                if cap.startswith("includepattern="):
                    raw = cap[len("includepattern="):]
                    if raw:
                        includepattern = raw.split('\0')
                elif cap.startswith("excludepattern="):
                    raw = cap[len("excludepattern="):]
                    if raw:
                        excludepattern = raw.split('\0')
            if includepattern or excludepattern:
                repo.shallowmatch = match.match(repo.root, '', None,
                                                includepattern, excludepattern)
            else:
                repo.shallowmatch = match.always(repo.root, '')
        return orig(repo, source, heads, common, bundlecaps)
    finally:
        repo.shallowmatch = original
    def _walkstreamfiles(orig, repo):
        if state.shallowremote:
            # if we are shallow ourselves, stream our local commits
            if shallowrepo.requirement in repo.requirements:
                striplen = len(repo.store.path) + 1
                readdir = repo.store.rawvfs.readdir
                visit = [os.path.join(repo.store.path, 'data')]
                while visit:
                    p = visit.pop()
                    for f, kind, st in readdir(p, stat=True):
                        fp = p + '/' + f
                        if kind == stat.S_IFREG:
                            if not fp.endswith('.i') and not fp.endswith('.d'):
                                n = util.pconvert(fp[striplen:])
                                yield (store.decodedir(n), n, st.st_size)
                        if kind == stat.S_IFDIR:
                            visit.append(fp)

            # Return .d and .i files that do not match the shallow pattern
            match = state.match or match.always(repo.root, '')
            for (u, e, s) in repo.store.datafiles():
                f = u[5:-2]  # trim data/...  and .i/.d
                if not state.match(f):
                    yield (u, e, s)

            for x in repo.store.topfiles():
                yield x
        elif shallowrepo.requirement in repo.requirements:
            # don't allow cloning from a shallow repo to a full repo
            # since it would require fetching every version of every
            # file in order to create the revlogs.
            raise util.Abort(_("Cannot clone from a shallow repo "
                             + "to a full repo."))
        else:
            for x in orig(repo):
                yield x
Пример #38
0
        def sparsematch(self, *revs, **kwargs):
            """Returns the sparse match function for the given revs.

            If multiple revs are specified, the match function is the union
            of all the revs.

            `includetemp` is used to indicate if the temporarily included file
            should be part of the matcher.
            """
            if not revs or revs == (None,):
                revs = [self.changelog.rev(node) for node in
                    self.dirstate.parents() if node != nullid]

            includetemp = kwargs.get('includetemp', True)
            signature = self._sparsesignature(includetemp=includetemp)

            key = '%s %s' % (str(signature), ' '.join([str(r) for r in revs]))

            result = self.sparsecache.get(key, None)
            if result:
                return result

            matchers = []
            for rev in revs:
                try:
                    includes, excludes, profiles = self.getsparsepatterns(rev)

                    if includes or excludes:
                        # Explicitly include subdirectories of includes so
                        # status will walk them down to the actual include.
                        subdirs = set()
                        for include in includes:
                            dirname = os.path.dirname(include)
                            # basename is used to avoid issues with absolute
                            # paths (which on Windows can include the drive).
                            while os.path.basename(dirname):
                                subdirs.add(dirname)
                                dirname = os.path.dirname(dirname)

                        matcher = matchmod.match(self.root, '', [],
                            include=includes, exclude=excludes,
                            default='relpath')
                        if subdirs:
                            matcher = forceincludematcher(matcher, subdirs)
                        matchers.append(matcher)
                except IOError:
                    pass

            result = None
            if not matchers:
                result = matchmod.always(self.root, '')
            elif len(matchers) == 1:
                result = matchers[0]
            else:
                result = unionmatcher(matchers)

            if kwargs.get('includetemp', True):
                tempincludes = self.gettemporaryincludes()
                result = forceincludematcher(result, tempincludes)

            self.sparsecache[key] = result

            return result
Пример #39
0
def overridestatus(
    orig, self, node1=".", node2=None, match=None, ignored=False, clean=False, unknown=False, listsubrepos=False
):
    listignored = ignored
    listclean = clean
    listunknown = unknown

    def _cmpsets(l1, l2):
        try:
            if "FSMONITOR_LOG_FILE" in os.environ:
                fn = os.environ["FSMONITOR_LOG_FILE"]
                f = open(fn, "wb")
            else:
                fn = "fsmonitorfail.log"
                f = self.opener(fn, "wb")
        except (IOError, OSError):
            self.ui.warn(_("warning: unable to write to %s\n") % fn)
            return

        try:
            for i, (s1, s2) in enumerate(zip(l1, l2)):
                if set(s1) != set(s2):
                    f.write("sets at position %d are unequal\n" % i)
                    f.write("watchman returned: %s\n" % s1)
                    f.write("stat returned: %s\n" % s2)
        finally:
            f.close()

    if isinstance(node1, context.changectx):
        ctx1 = node1
    else:
        ctx1 = self[node1]
    if isinstance(node2, context.changectx):
        ctx2 = node2
    else:
        ctx2 = self[node2]

    working = ctx2.rev() is None
    parentworking = working and ctx1 == self["."]
    match = match or matchmod.always(self.root, self.getcwd())

    # Maybe we can use this opportunity to update Watchman's state.
    # Mercurial uses workingcommitctx and/or memctx to represent the part of
    # the workingctx that is to be committed. So don't update the state in
    # that case.
    # HG_PENDING is set in the environment when the dirstate is being updated
    # in the middle of a transaction; we must not update our state in that
    # case, or we risk forgetting about changes in the working copy.
    updatestate = (
        parentworking
        and match.always()
        and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
        and "HG_PENDING" not in os.environ
    )

    try:
        if self._fsmonitorstate.walk_on_invalidate:
            # Use a short timeout to query the current clock.  If that
            # takes too long then we assume that the service will be slow
            # to answer our query.
            # walk_on_invalidate indicates that we prefer to walk the
            # tree ourselves because we can ignore portions that Watchman
            # cannot and we tend to be faster in the warmer buffer cache
            # cases.
            self._watchmanclient.settimeout(0.1)
        else:
            # Give Watchman more time to potentially complete its walk
            # and return the initial clock.  In this mode we assume that
            # the filesystem will be slower than parsing a potentially
            # very large Watchman result set.
            self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
        startclock = self._watchmanclient.getcurrentclock()
    except Exception as ex:
        self._watchmanclient.clearconnection()
        _handleunavailable(self.ui, self._fsmonitorstate, ex)
        # boo, Watchman failed. bail
        return orig(node1, node2, match, listignored, listclean, listunknown, listsubrepos)

    if updatestate:
        # We need info about unknown files. This may make things slower the
        # first time, but whatever.
        stateunknown = True
    else:
        stateunknown = listunknown

    r = orig(node1, node2, match, listignored, listclean, stateunknown, listsubrepos)
    modified, added, removed, deleted, unknown, ignored, clean = r

    if updatestate:
        notefiles = modified + added + removed + deleted + unknown
        self._fsmonitorstate.set(
            self._fsmonitorstate.getlastclock() or startclock, _hashignore(self.dirstate._ignore), notefiles
        )

    if not listunknown:
        unknown = []

    # don't do paranoid checks if we're not going to query Watchman anyway
    full = listclean or match.traversedir is not None
    if self._fsmonitorstate.mode == "paranoid" and not full:
        # run status again and fall back to the old walk this time
        self.dirstate._fsmonitordisable = True

        # shut the UI up
        quiet = self.ui.quiet
        self.ui.quiet = True
        fout, ferr = self.ui.fout, self.ui.ferr
        self.ui.fout = self.ui.ferr = open(os.devnull, "wb")

        try:
            rv2 = orig(node1, node2, match, listignored, listclean, listunknown, listsubrepos)
        finally:
            self.dirstate._fsmonitordisable = False
            self.ui.quiet = quiet
            self.ui.fout, self.ui.ferr = fout, ferr

        # clean isn't tested since it's set to True above
        _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], rv2)
        modified, added, removed, deleted, unknown, ignored, clean = rv2

    return scmutil.status(modified, added, removed, deleted, unknown, ignored, clean)
Пример #40
0
        def status(self, node1='.', node2=None, match=None, ignored=False,
                clean=False, unknown=False, listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.lfstatus:
                return super(lfilesrepo, self).status(node1, node2, match,
                    listignored, listclean, listunknown, listsubrepos)
            else:
                # some calls in this function rely on the old version of status
                self.lfstatus = False
                if isinstance(node1, context.changectx):
                    ctx1 = node1
                else:
                    ctx1 = repo[node1]
                if isinstance(node2, context.changectx):
                    ctx2 = node2
                else:
                    ctx2 = repo[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except KeyError:
                        return False

                if match is None:
                    match = match_.always(self.root, self.getcwd())

                # First check if there were files specified on the
                # command line.  If there were, and none of them were
                # largefiles, we should just bail here and let super
                # handle it -- thus gaining a big performance boost.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                if match.files() and not match.anypats():
                    for f in lfdirstate:
                        if match(f):
                            break
                    else:
                        return super(lfilesrepo, self).status(node1, node2,
                                match, listignored, listclean,
                                listunknown, listsubrepos)

                # Create a copy of match that matches standins instead
                # of largefiles.
                def tostandins(files):
                    if not working:
                        return files
                    newfiles = []
                    dirstate = repo.dirstate
                    for f in files:
                        sf = lfutil.standin(f)
                        if sf in dirstate:
                            newfiles.append(sf)
                        elif sf in dirstate.dirs():
                            # Directory entries could be regular or
                            # standin, check both
                            newfiles.extend((f, sf))
                        else:
                            newfiles.append(f)
                    return newfiles

                # Create a function that we can use to override what is
                # normally the ignore matcher.  We've already checked
                # for ignored files on the first dirstate walk, and
                # unecessarily re-checking here causes a huge performance
                # hit because lfdirstate only knows about largefiles
                def _ignoreoverride(self):
                    return False

                m = copy.copy(match)
                m._files = tostandins(m._files)

                # Get ignored files here even if we weren't asked for them; we
                # must use the result here for filtering later
                result = super(lfilesrepo, self).status(node1, node2, m,
                    True, clean, unknown, listsubrepos)
                if working:
                    try:
                        # Any non-largefiles that were explicitly listed must be
                        # taken out or lfdirstate.status will report an error.
                        # The status of these files was already computed using
                        # super's status.
                        # Override lfdirstate's ignore matcher to not do
                        # anything
                        origignore = lfdirstate._ignore
                        lfdirstate._ignore = _ignoreoverride

                        def sfindirstate(f):
                            sf = lfutil.standin(f)
                            dirstate = repo.dirstate
                            return sf in dirstate or sf in dirstate.dirs()
                        match._files = [f for f in match._files
                                        if sfindirstate(f)]
                        # Don't waste time getting the ignored and unknown
                        # files again; we already have them
                        s = lfdirstate.status(match, [], False,
                                listclean, False)
                        (unsure, modified, added, removed, missing, unknown,
                                ignored, clean) = s
                        # Replace the list of ignored and unknown files with
                        # the previously caclulated lists, and strip out the
                        # largefiles
                        lfiles = set(lfdirstate._map)
                        ignored = set(result[5]).difference(lfiles)
                        unknown = set(result[4]).difference(lfiles)
                        if parentworking:
                            for lfile in unsure:
                                standin = lfutil.standin(lfile)
                                if standin not in ctx1:
                                    # from second parent
                                    modified.append(lfile)
                                elif ctx1[standin].data().strip() \
                                        != lfutil.hashfile(self.wjoin(lfile)):
                                    modified.append(lfile)
                                else:
                                    clean.append(lfile)
                                    lfdirstate.normal(lfile)
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for lfile in tocheck:
                                standin = lfutil.standin(lfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != \
                                            lfutil.hashfile(self.wjoin(lfile)):
                                        modified.append(lfile)
                                    else:
                                        clean.append(lfile)
                                else:
                                    added.append(lfile)
                    finally:
                        # Replace the original ignore function
                        lfdirstate._ignore = origignore

                    for standin in ctx1.manifest():
                        if not lfutil.isstandin(standin):
                            continue
                        lfile = lfutil.splitstandin(standin)
                        if not match(lfile):
                            continue
                        if lfile not in lfdirstate:
                            removed.append(lfile)

                    # Filter result lists
                    result = list(result)

                    # Largefiles are not really removed when they're
                    # still in the normal dirstate. Likewise, normal
                    # files are not really removed if it's still in
                    # lfdirstate. This happens in merges where files
                    # change type.
                    removed = [f for f in removed if f not in repo.dirstate]
                    result[2] = [f for f in result[2] if f not in lfdirstate]

                    # Unknown files
                    unknown = set(unknown).difference(ignored)
                    result[4] = [f for f in unknown
                                 if (repo.dirstate[f] == '?' and
                                     not lfutil.isstandin(f))]
                    # Ignored files were calculated earlier by the dirstate,
                    # and we already stripped out the largefiles from the list
                    result[5] = ignored
                    # combine normal files and largefiles
                    normals = [[fn for fn in filelist
                                if not lfutil.isstandin(fn)]
                               for filelist in result]
                    lfiles = (modified, added, removed, missing, [], [], clean)
                    result = [sorted(list1 + list2)
                              for (list1, list2) in zip(normals, lfiles)]
                else:
                    def toname(f):
                        if lfutil.isstandin(f):
                            return lfutil.splitstandin(f)
                        return f
                    result = [[toname(f) for f in items] for items in result]

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.lfstatus = True
                return result
Пример #41
0
        def commit(self, text="", user=None, date=None, match=None,
                force=False, editor=False, extra={}):
            orig = super(lfilesrepo, self).commit

            wlock = repo.wlock()
            try:
                # Case 0: Rebase or Transplant
                # We have to take the time to pull down the new largefiles now.
                # Otherwise, any largefiles that were modified in the
                # destination changesets get overwritten, either by the rebase
                # or in the first commit after the rebase or transplant.
                # updatelfiles will update the dirstate to mark any pulled
                # largefiles as modified
                if getattr(repo, "_isrebasing", False) or \
                        getattr(repo, "_istransplanting", False):
                    lfcommands.updatelfiles(repo.ui, repo, filelist=None,
                                            printmessage=False)
                    result = orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)
                    return result
                # Case 1: user calls commit with no specific files or
                # include/exclude patterns: refresh and commit all files that
                # are "dirty".
                if ((match is None) or
                    (not match.anypats() and not match.files())):
                    # Spend a bit of time here to get a list of files we know
                    # are modified so we can compare only against those.
                    # It can cost a lot of time (several seconds)
                    # otherwise to update all standins if the largefiles are
                    # large.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    dirtymatch = match_.always(repo.root, repo.getcwd())
                    s = lfdirstate.status(dirtymatch, [], False, False, False)
                    modifiedfiles = []
                    for i in s:
                        modifiedfiles.extend(i)
                    lfiles = lfutil.listlfiles(self)
                    # this only loops through largefiles that exist (not
                    # removed/renamed)
                    for lfile in lfiles:
                        if lfile in modifiedfiles:
                            if os.path.exists(
                                    self.wjoin(lfutil.standin(lfile))):
                                # this handles the case where a rebase is being
                                # performed and the working copy is not updated
                                # yet.
                                if os.path.exists(self.wjoin(lfile)):
                                    lfutil.updatestandin(self,
                                        lfutil.standin(lfile))
                                    lfdirstate.normal(lfile)
                    for lfile in lfdirstate:
                        if lfile in modifiedfiles:
                            if not os.path.exists(
                                    repo.wjoin(lfutil.standin(lfile))):
                                lfdirstate.drop(lfile)

                    result = orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)
                    # This needs to be after commit; otherwise precommit hooks
                    # get the wrong status
                    lfdirstate.write()
                    return result

                for f in match.files():
                    if lfutil.isstandin(f):
                        raise util.Abort(
                            _('file "%s" is a largefile standin') % f,
                            hint=('commit the largefile itself instead'))

                # Case 2: user calls commit with specified patterns: refresh
                # any matching big files.
                smatcher = lfutil.composestandinmatcher(self, match)
                standins = lfutil.dirstatewalk(self.dirstate, smatcher)

                # No matching big files: get out of the way and pass control to
                # the usual commit() method.
                if not standins:
                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                # Refresh all matching big files.  It's possible that the
                # commit will end up failing, in which case the big files will
                # stay refreshed.  No harm done: the user modified them and
                # asked to commit them, so sooner or later we're going to
                # refresh the standins.  Might as well leave them refreshed.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                for standin in standins:
                    lfile = lfutil.splitstandin(standin)
                    if lfdirstate[lfile] <> 'r':
                        lfutil.updatestandin(self, standin)
                        lfdirstate.normal(lfile)
                    else:
                        lfdirstate.drop(lfile)

                # Cook up a new matcher that only matches regular files or
                # standins corresponding to the big files requested by the
                # user.  Have to modify _files to prevent commit() from
                # complaining "not tracked" for big files.
                lfiles = lfutil.listlfiles(repo)
                match = copy.copy(match)
                origmatchfn = match.matchfn

                # Check both the list of largefiles and the list of
                # standins because if a largefile was removed, it
                # won't be in the list of largefiles at this point
                match._files += sorted(standins)

                actualfiles = []
                for f in match._files:
                    fstandin = lfutil.standin(f)

                    # ignore known largefiles and standins
                    if f in lfiles or fstandin in standins:
                        continue

                    # append directory separator to avoid collisions
                    if not fstandin.endswith(os.sep):
                        fstandin += os.sep

                    actualfiles.append(f)
                match._files = actualfiles

                def matchfn(f):
                    if origmatchfn(f):
                        return f not in lfiles
                    else:
                        return f in standins

                match.matchfn = matchfn
                result = orig(text=text, user=user, date=date, match=match,
                                force=force, editor=editor, extra=extra)
                # This needs to be after commit; otherwise precommit hooks
                # get the wrong status
                lfdirstate.write()
                return result
            finally:
                wlock.release()
Пример #42
0
        def status(self, node1='.', node2=None, match=None, ignored=False,
                clean=False, unknown=False, listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.lfstatus:
                try:
                    return super(lfiles_repo, self).status(node1, node2, match,
                        listignored, listclean, listunknown, listsubrepos)
                except TypeError:
                    return super(lfiles_repo, self).status(node1, node2, match,
                        listignored, listclean, listunknown)
            else:
                # some calls in this function rely on the old version of status
                self.lfstatus = False
                if isinstance(node1, context.changectx):
                    ctx1 = node1
                else:
                    ctx1 = repo[node1]
                if isinstance(node2, context.changectx):
                    ctx2 = node2
                else:
                    ctx2 = repo[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except KeyError:
                        return False

                if match is None:
                    match = match_.always(self.root, self.getcwd())

                # Create a copy of match that matches standins instead
                # of largefiles.
                def tostandin(file):
                    if inctx(lfutil.standin(file), ctx2):
                        return lfutil.standin(file)
                    return file

                m = copy.copy(match)
                m._files = [tostandin(f) for f in m._files]

                # get ignored, clean, and unknown but remove them
                # later if they were not asked for
                try:
                    result = super(lfiles_repo, self).status(node1, node2, m,
                        True, True, True, listsubrepos)
                except TypeError:
                    result = super(lfiles_repo, self).status(node1, node2, m,
                        True, True, True)
                if working:
                    # hold the wlock while we read largefiles and
                    # update the lfdirstate
                    wlock = repo.wlock()
                    try:
                        # Any non-largefiles that were explicitly listed must be
                        # taken out or lfdirstate.status will report an error.
                        # The status of these files was already computed using
                        # super's status.
                        lfdirstate = lfutil.openlfdirstate(ui, self)
                        match._files = [f for f in match._files if f in
                            lfdirstate]
                        s = lfdirstate.status(match, [], listignored,
                                listclean, listunknown)
                        (unsure, modified, added, removed, missing, unknown,
                                ignored, clean) = s
                        if parentworking:
                            for lfile in unsure:
                                if ctx1[lfutil.standin(lfile)].data().strip() \
                                        != lfutil.hashfile(self.wjoin(lfile)):
                                    modified.append(lfile)
                                else:
                                    clean.append(lfile)
                                    lfdirstate.normal(lfile)
                            lfdirstate.write()
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for lfile in tocheck:
                                standin = lfutil.standin(lfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != \
                                            lfutil.hashfile(self.wjoin(lfile)):
                                        modified.append(lfile)
                                    else:
                                        clean.append(lfile)
                                else:
                                    added.append(lfile)
                    finally:
                        wlock.release()

                    for standin in ctx1.manifest():
                        if not lfutil.isstandin(standin):
                            continue
                        lfile = lfutil.splitstandin(standin)
                        if not match(lfile):
                            continue
                        if lfile not in lfdirstate:
                            removed.append(lfile)
                    # Handle unknown and ignored differently
                    lfiles = (modified, added, removed, missing, [], [], clean)
                    result = list(result)
                    # Unknown files
                    result[4] = [f for f in unknown
                                 if (repo.dirstate[f] == '?' and
                                     not lfutil.isstandin(f))]
                    # Ignored files must be ignored by both the dirstate and
                    # lfdirstate
                    result[5] = set(ignored).intersection(set(result[5]))
                    # combine normal files and largefiles
                    normals = [[fn for fn in filelist
                                if not lfutil.isstandin(fn)]
                               for filelist in result]
                    result = [sorted(list1 + list2)
                              for (list1, list2) in zip(normals, lfiles)]
                else:
                    def toname(f):
                        if lfutil.isstandin(f):
                            return lfutil.splitstandin(f)
                        return f
                    result = [[toname(f) for f in items] for items in result]

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.lfstatus = True
                return result
Пример #43
0
        def status(self, node1='.', node2=None, match=None, ignored=False,
                clean=False, unknown=False, listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.lfstatus:
                return super(lfilesrepo, self).status(node1, node2, match,
                    listignored, listclean, listunknown, listsubrepos)
            else:
                # some calls in this function rely on the old version of status
                self.lfstatus = False
                if isinstance(node1, context.changectx):
                    ctx1 = node1
                else:
                    ctx1 = self[node1]
                if isinstance(node2, context.changectx):
                    ctx2 = node2
                else:
                    ctx2 = self[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except KeyError:
                        return False

                if match is None:
                    match = match_.always(self.root, self.getcwd())

                wlock = None
                try:
                    try:
                        # updating the dirstate is optional
                        # so we don't wait on the lock
                        wlock = self.wlock(False)
                    except error.LockError:
                        pass

                    # First check if there were files specified on the
                    # command line.  If there were, and none of them were
                    # largefiles, we should just bail here and let super
                    # handle it -- thus gaining a big performance boost.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    if match.files() and not match.anypats():
                        for f in lfdirstate:
                            if match(f):
                                break
                        else:
                            return super(lfilesrepo, self).status(node1, node2,
                                    match, listignored, listclean,
                                    listunknown, listsubrepos)

                    # Create a copy of match that matches standins instead
                    # of largefiles.
                    def tostandins(files):
                        if not working:
                            return files
                        newfiles = []
                        dirstate = self.dirstate
                        for f in files:
                            sf = lfutil.standin(f)
                            if sf in dirstate:
                                newfiles.append(sf)
                            elif sf in dirstate.dirs():
                                # Directory entries could be regular or
                                # standin, check both
                                newfiles.extend((f, sf))
                            else:
                                newfiles.append(f)
                        return newfiles

                    m = copy.copy(match)
                    m._files = tostandins(m._files)

                    result = super(lfilesrepo, self).status(node1, node2, m,
                        ignored, clean, unknown, listsubrepos)
                    if working:

                        def sfindirstate(f):
                            sf = lfutil.standin(f)
                            dirstate = self.dirstate
                            return sf in dirstate or sf in dirstate.dirs()

                        match._files = [f for f in match._files
                                        if sfindirstate(f)]
                        # Don't waste time getting the ignored and unknown
                        # files from lfdirstate
                        s = lfdirstate.status(match, [], False,
                                listclean, False)
                        (unsure, modified, added, removed, missing, _unknown,
                                _ignored, clean) = s
                        if parentworking:
                            for lfile in unsure:
                                standin = lfutil.standin(lfile)
                                if standin not in ctx1:
                                    # from second parent
                                    modified.append(lfile)
                                elif ctx1[standin].data().strip() \
                                        != lfutil.hashfile(self.wjoin(lfile)):
                                    modified.append(lfile)
                                else:
                                    clean.append(lfile)
                                    lfdirstate.normal(lfile)
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for lfile in tocheck:
                                standin = lfutil.standin(lfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != \
                                            lfutil.hashfile(self.wjoin(lfile)):
                                        modified.append(lfile)
                                    else:
                                        clean.append(lfile)
                                else:
                                    added.append(lfile)

                        # Standins no longer found in lfdirstate has been
                        # removed
                        for standin in ctx1.manifest():
                            if not lfutil.isstandin(standin):
                                continue
                            lfile = lfutil.splitstandin(standin)
                            if not match(lfile):
                                continue
                            if lfile not in lfdirstate:
                                removed.append(lfile)

                        # Filter result lists
                        result = list(result)

                        # Largefiles are not really removed when they're
                        # still in the normal dirstate. Likewise, normal
                        # files are not really removed if they are still in
                        # lfdirstate. This happens in merges where files
                        # change type.
                        removed = [f for f in removed
                                   if f not in self.dirstate]
                        result[2] = [f for f in result[2]
                                     if f not in lfdirstate]

                        lfiles = set(lfdirstate._map)
                        # Unknown files
                        result[4] = set(result[4]).difference(lfiles)
                        # Ignored files
                        result[5] = set(result[5]).difference(lfiles)
                        # combine normal files and largefiles
                        normals = [[fn for fn in filelist
                                    if not lfutil.isstandin(fn)]
                                   for filelist in result]
                        lfiles = (modified, added, removed, missing, [], [],
                                  clean)
                        result = [sorted(list1 + list2)
                                  for (list1, list2) in zip(normals, lfiles)]
                    else:
                        def toname(f):
                            if lfutil.isstandin(f):
                                return lfutil.splitstandin(f)
                            return f
                        result = [[toname(f) for f in items]
                                  for items in result]

                    if wlock:
                        lfdirstate.write()

                finally:
                    if wlock:
                        wlock.release()

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.lfstatus = True
                return result
Пример #44
0
        def status(self,
                   node1='.',
                   node2=None,
                   match=None,
                   ignored=False,
                   clean=False,
                   unknown=False,
                   listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            orig = super(lfilesrepo, self).status
            if not self.lfstatus:
                return orig(node1, node2, match, listignored, listclean,
                            listunknown, listsubrepos)

            # some calls in this function rely on the old version of status
            self.lfstatus = False
            ctx1 = self[node1]
            ctx2 = self[node2]
            working = ctx2.rev() is None
            parentworking = working and ctx1 == self['.']

            if match is None:
                match = matchmod.always(self.root, self.getcwd())

            wlock = None
            try:
                try:
                    # updating the dirstate is optional
                    # so we don't wait on the lock
                    wlock = self.wlock(False)
                except error.LockError:
                    pass

                # First check if paths or patterns were specified on the
                # command line.  If there were, and they don't match any
                # largefiles, we should just bail here and let super
                # handle it -- thus gaining a big performance boost.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                if not match.always():
                    for f in lfdirstate:
                        if match(f):
                            break
                    else:
                        return orig(node1, node2, match, listignored,
                                    listclean, listunknown, listsubrepos)

                # Create a copy of match that matches standins instead
                # of largefiles.
                def tostandins(files):
                    if not working:
                        return files
                    newfiles = []
                    dirstate = self.dirstate
                    for f in files:
                        sf = lfutil.standin(f)
                        if sf in dirstate:
                            newfiles.append(sf)
                        elif sf in dirstate.dirs():
                            # Directory entries could be regular or
                            # standin, check both
                            newfiles.extend((f, sf))
                        else:
                            newfiles.append(f)
                    return newfiles

                m = copy.copy(match)
                m._files = tostandins(m._files)

                result = orig(node1, node2, m, ignored, clean, unknown,
                              listsubrepos)
                if working:

                    def sfindirstate(f):
                        sf = lfutil.standin(f)
                        dirstate = self.dirstate
                        return sf in dirstate or sf in dirstate.dirs()

                    match._files = [f for f in match._files if sfindirstate(f)]
                    # Don't waste time getting the ignored and unknown
                    # files from lfdirstate
                    unsure, s = lfdirstate.status(match, [], False, listclean,
                                                  False)
                    (modified, added, removed, deleted,
                     clean) = (s.modified, s.added, s.removed, s.deleted,
                               s.clean)
                    if parentworking:
                        for lfile in unsure:
                            standin = lfutil.standin(lfile)
                            if standin not in ctx1:
                                # from second parent
                                modified.append(lfile)
                            elif ctx1[standin].data().strip() \
                                    != lfutil.hashfile(self.wjoin(lfile)):
                                modified.append(lfile)
                            else:
                                if listclean:
                                    clean.append(lfile)
                                lfdirstate.normal(lfile)
                    else:
                        tocheck = unsure + modified + added + clean
                        modified, added, clean = [], [], []
                        checkexec = self.dirstate._checkexec

                        for lfile in tocheck:
                            standin = lfutil.standin(lfile)
                            if standin in ctx1:
                                abslfile = self.wjoin(lfile)
                                if ((ctx1[standin].data().strip() !=
                                     lfutil.hashfile(abslfile)) or
                                    (checkexec and ('x' in ctx1.flags(standin))
                                     != bool(lfutil.getexecutable(abslfile)))):
                                    modified.append(lfile)
                                elif listclean:
                                    clean.append(lfile)
                            else:
                                added.append(lfile)

                        # at this point, 'removed' contains largefiles
                        # marked as 'R' in the working context.
                        # then, largefiles not managed also in the target
                        # context should be excluded from 'removed'.
                        removed = [
                            lfile for lfile in removed
                            if lfutil.standin(lfile) in ctx1
                        ]

                    # Standins no longer found in lfdirstate have been deleted
                    for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
                        lfile = lfutil.splitstandin(standin)
                        if not match(lfile):
                            continue
                        if lfile not in lfdirstate:
                            deleted.append(lfile)
                            # Sync "largefile has been removed" back to the
                            # standin. Removing a file as a side effect of
                            # running status is gross, but the alternatives (if
                            # any) are worse.
                            self.wvfs.unlinkpath(standin, ignoremissing=True)

                    # Filter result lists
                    result = list(result)

                    # Largefiles are not really removed when they're
                    # still in the normal dirstate. Likewise, normal
                    # files are not really removed if they are still in
                    # lfdirstate. This happens in merges where files
                    # change type.
                    removed = [f for f in removed if f not in self.dirstate]
                    result[2] = [f for f in result[2] if f not in lfdirstate]

                    lfiles = set(lfdirstate._map)
                    # Unknown files
                    result[4] = set(result[4]).difference(lfiles)
                    # Ignored files
                    result[5] = set(result[5]).difference(lfiles)
                    # combine normal files and largefiles
                    normals = [[
                        fn for fn in filelist if not lfutil.isstandin(fn)
                    ] for filelist in result]
                    lfstatus = (modified, added, removed, deleted, [], [],
                                clean)
                    result = [
                        sorted(list1 + list2)
                        for (list1, list2) in zip(normals, lfstatus)
                    ]
                else:  # not against working directory
                    result = [[lfutil.splitstandin(f) or f for f in items]
                              for items in result]

                if wlock:
                    lfdirstate.write()

            finally:
                if wlock:
                    wlock.release()

            self.lfstatus = True
            return scmutil.status(*result)
Пример #45
0
                seriespath = os.path.join(self.path, 'series')
                if os.path.exists(seriespath):
                    os.unlink(seriespath)
                p1 = repo.dirstate.parents()[0]
                p2 = node
                self.log(user, date, message, p1, p2, merge=merge)
                self.ui.write(str(inst) + '\n')
                raise util.Abort(
                    _('Fix up the merge and run '
                      'hg transplant --continue'))
        else:
            files = None
        if merge:
            p1, p2 = repo.dirstate.parents()
            repo.dirstate.setparents(p1, node)
            m = match.always(repo.root, '')
        else:
            m = match.exact(repo.root, '', files)

        n = repo.commit(message, user, date, extra=extra, match=m)
        if not merge:
            self.transplants.set(n, node)

        return n

    def resume(self, repo, source, opts=None):
        '''recover last transaction and apply remaining changesets'''
        if os.path.exists(os.path.join(self.path, 'journal')):
            n, node = self.recover(repo)
            self.ui.status(
                _('%s transplanted as %s\n') %
Пример #46
0
def updatestandinsbymatch(repo, match):
    '''Update standins in the working directory according to specified match

    This returns (possibly modified) ``match`` object to be used for
    subsequent commit process.
    '''

    ui = repo.ui

    # Case 1: user calls commit with no specific files or
    # include/exclude patterns: refresh and commit all files that
    # are "dirty".
    if match is None or match.always():
        # Spend a bit of time here to get a list of files we know
        # are modified so we can compare only against those.
        # It can cost a lot of time (several seconds)
        # otherwise to update all standins if the largefiles are
        # large.
        lfdirstate = openlfdirstate(ui, repo)
        dirtymatch = matchmod.always()
        unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False,
                                      clean=False, unknown=False)
        modifiedfiles = unsure + s.modified + s.added + s.removed
        lfiles = listlfiles(repo)
        # this only loops through largefiles that exist (not
        # removed/renamed)
        for lfile in lfiles:
            if lfile in modifiedfiles:
                fstandin = standin(lfile)
                if repo.wvfs.exists(fstandin):
                    # this handles the case where a rebase is being
                    # performed and the working copy is not updated
                    # yet.
                    if repo.wvfs.exists(lfile):
                        updatestandin(repo, lfile, fstandin)

        return match

    lfiles = listlfiles(repo)
    match._files = repo._subdirlfs(match.files(), lfiles)

    # Case 2: user calls commit with specified patterns: refresh
    # any matching big files.
    smatcher = composestandinmatcher(repo, match)
    standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
                                  ignored=False)

    # No matching big files: get out of the way and pass control to
    # the usual commit() method.
    if not standins:
        return match

    # Refresh all matching big files.  It's possible that the
    # commit will end up failing, in which case the big files will
    # stay refreshed.  No harm done: the user modified them and
    # asked to commit them, so sooner or later we're going to
    # refresh the standins.  Might as well leave them refreshed.
    lfdirstate = openlfdirstate(ui, repo)
    for fstandin in standins:
        lfile = splitstandin(fstandin)
        if lfdirstate[lfile] != 'r':
            updatestandin(repo, lfile, fstandin)

    # Cook up a new matcher that only matches regular files or
    # standins corresponding to the big files requested by the
    # user.  Have to modify _files to prevent commit() from
    # complaining "not tracked" for big files.
    match = copy.copy(match)
    origmatchfn = match.matchfn

    # Check both the list of largefiles and the list of
    # standins because if a largefile was removed, it
    # won't be in the list of largefiles at this point
    match._files += sorted(standins)

    actualfiles = []
    for f in match._files:
        fstandin = standin(f)

        # For largefiles, only one of the normal and standin should be
        # committed (except if one of them is a remove).  In the case of a
        # standin removal, drop the normal file if it is unknown to dirstate.
        # Thus, skip plain largefile names but keep the standin.
        if f in lfiles or fstandin in standins:
            if repo.dirstate[fstandin] != 'r':
                if repo.dirstate[f] != 'r':
                    continue
            elif repo.dirstate[f] == '?':
                continue

        actualfiles.append(f)
    match._files = actualfiles

    def matchfn(f):
        if origmatchfn(f):
            return f not in lfiles
        else:
            return f in standins

    match.matchfn = matchfn

    return match
Пример #47
0
def wraprepo(repo):
    class shallowrepository(repo.__class__):
        @util.propertycache
        def name(self):
            return self.ui.config('remotefilelog', 'reponame', '')

        @util.propertycache
        def fallbackpath(self):
            path = repo.ui.config("remotefilelog", "fallbackpath",
                     # fallbackrepo is the old, deprecated name
                     repo.ui.config("remotefilelog", "fallbackrepo",
                       repo.ui.config("paths", "default")))
            if not path:
                raise error.Abort("no remotefilelog server "
                    "configured - is your .hg/hgrc trusted?")

            return path

        def maybesparsematch(self, *revs, **kwargs):
            '''
            A wrapper that allows the remotefilelog to invoke sparsematch() if
            this is a sparse repository, or returns None if this is not a
            sparse repository.
            '''
            if util.safehasattr(self, 'sparsematch'):
                return self.sparsematch(*revs, **kwargs)

            return None

        def file(self, f):
            if f[0] == '/':
                f = f[1:]

            if self.shallowmatch(f):
                return remotefilelog.remotefilelog(self.svfs, f, self)
            else:
                return super(shallowrepository, self).file(f)

        def filectx(self, path, *args, **kwargs):
            if self.shallowmatch(path):
                return remotefilectx.remotefilectx(self, path, *args, **kwargs)
            else:
                return super(shallowrepository, self).filectx(path, *args,
                                                              **kwargs)

        @localrepo.unfilteredmethod
        def commitctx(self, ctx, error=False):
            """Add a new revision to current repository.
            Revision information is passed via the context argument.
            """

            # some contexts already have manifest nodes, they don't need any
            # prefetching (for example if we're just editing a commit message
            # we can reuse manifest
            if not ctx.manifestnode():
                # prefetch files that will likely be compared
                m1 = ctx.p1().manifest()
                files = []
                for f in ctx.modified() + ctx.added():
                    fparent1 = m1.get(f, nullid)
                    if fparent1 != nullid:
                        files.append((f, hex(fparent1)))
                self.fileservice.prefetch(files)
            return super(shallowrepository, self).commitctx(ctx,
                                                            error=error)

        def backgroundprefetch(self, revs, base=None, repack=False, pats=None,
                               opts=None):
            """Runs prefetch in background with optional repack
            """
            cmd = [_hgexecutable(), '-R', repo.origroot, 'prefetch']
            if repack:
                cmd.append('--repack')
            if revs:
                cmd += ['-r', revs]
            cmd = ' '.join(map(procutil.shellquote, cmd))

            runshellcommand(cmd, os.environ)

        def prefetch(self, revs, base=None, pats=None, opts=None):
            """Prefetches all the necessary file revisions for the given revs
            Optionally runs repack in background
            """
            with repo._lock(repo.svfs, 'prefetchlock', True, None, None,
                            _('prefetching in %s') % repo.origroot):
                self._prefetch(revs, base, pats, opts)

        def _prefetch(self, revs, base=None, pats=None, opts=None):
            fallbackpath = self.fallbackpath
            if fallbackpath:
                # If we know a rev is on the server, we should fetch the server
                # version of those files, since our local file versions might
                # become obsolete if the local commits are stripped.
                localrevs = repo.revs('outgoing(%s)', fallbackpath)
                if base is not None and base != nullrev:
                    serverbase = list(repo.revs('first(reverse(::%s) - %ld)',
                                                base, localrevs))
                    if serverbase:
                        base = serverbase[0]
            else:
                localrevs = repo

            mfl = repo.manifestlog
            mfrevlog = mfl._revlog
            if base is not None:
                mfdict = mfl[repo[base].manifestnode()].read()
                skip = set(mfdict.iteritems())
            else:
                skip = set()

            # Copy the skip set to start large and avoid constant resizing,
            # and since it's likely to be very similar to the prefetch set.
            files = skip.copy()
            serverfiles = skip.copy()
            visited = set()
            visited.add(nullrev)
            revnum = 0
            revcount = len(revs)
            self.ui.progress(_prefetching, revnum, total=revcount)
            for rev in sorted(revs):
                ctx = repo[rev]
                if pats:
                    m = scmutil.match(ctx, pats, opts)
                sparsematch = repo.maybesparsematch(rev)

                mfnode = ctx.manifestnode()
                mfrev = mfrevlog.rev(mfnode)

                # Decompressing manifests is expensive.
                # When possible, only read the deltas.
                p1, p2 = mfrevlog.parentrevs(mfrev)
                if p1 in visited and p2 in visited:
                    mfdict = mfl[mfnode].readfast()
                else:
                    mfdict = mfl[mfnode].read()

                diff = mfdict.iteritems()
                if pats:
                    diff = (pf for pf in diff if m(pf[0]))
                if sparsematch:
                    diff = (pf for pf in diff if sparsematch(pf[0]))
                if rev not in localrevs:
                    serverfiles.update(diff)
                else:
                    files.update(diff)

                visited.add(mfrev)
                revnum += 1
                self.ui.progress(_prefetching, revnum, total=revcount)

            files.difference_update(skip)
            serverfiles.difference_update(skip)
            self.ui.progress(_prefetching, None)

            # Fetch files known to be on the server
            if serverfiles:
                results = [(path, hex(fnode)) for (path, fnode) in serverfiles]
                repo.fileservice.prefetch(results, force=True)

            # Fetch files that may or may not be on the server
            if files:
                results = [(path, hex(fnode)) for (path, fnode) in files]
                repo.fileservice.prefetch(results)

        def close(self):
            super(shallowrepository, self).close()
            self.connectionpool.close()

    repo.__class__ = shallowrepository

    repo.shallowmatch = match.always(repo.root, '')

    makeunionstores(repo)

    repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern",
                                             None)
    repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern",
                                             None)
    if not util.safehasattr(repo, 'connectionpool'):
        repo.connectionpool = connectionpool.connectionpool(repo)

    if repo.includepattern or repo.excludepattern:
        repo.shallowmatch = match.match(repo.root, '', None,
            repo.includepattern, repo.excludepattern)
Пример #48
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None,
                sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui,
                           url,
                           dest,
                           upstream,
                           revision,
                           branch,
                           purge,
                           sharebase,
                           networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot enable sparse profile on existing '
            'non-sparse checkout',
            hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot use non-sparse checkout on existing sparse '
            'checkout',
            hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' %
                        (e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                destvfs.rmtree(forcibly=True)
            storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            res = hg.clone(ui, {},
                           clonepeer,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(
                    repo.root, '')

            if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{
                        'print': None,
                        'print0': None,
                        'dirs': None,
                        'files': None
                    }):
                raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock():
                fcounts = map(
                    len,
                    sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                          [sparse_profile],
                                                          force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
Пример #49
0
        def status(self,
                   node1='.',
                   node2=None,
                   match=None,
                   ignored=False,
                   clean=False,
                   unknown=False,
                   listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.lfstatus:
                return super(lfilesrepo,
                             self).status(node1, node2, match, listignored,
                                          listclean, listunknown, listsubrepos)
            else:
                # some calls in this function rely on the old version of status
                self.lfstatus = False
                ctx1 = self[node1]
                ctx2 = self[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except KeyError:
                        return False

                if match is None:
                    match = match_.always(self.root, self.getcwd())

                wlock = None
                try:
                    try:
                        # updating the dirstate is optional
                        # so we don't wait on the lock
                        wlock = self.wlock(False)
                    except error.LockError:
                        pass

                    # First check if there were files specified on the
                    # command line.  If there were, and none of them were
                    # largefiles, we should just bail here and let super
                    # handle it -- thus gaining a big performance boost.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    if match.files() and not match.anypats():
                        for f in lfdirstate:
                            if match(f):
                                break
                        else:
                            return super(lfilesrepo,
                                         self).status(node1, node2, match,
                                                      listignored, listclean,
                                                      listunknown,
                                                      listsubrepos)

                    # Create a copy of match that matches standins instead
                    # of largefiles.
                    def tostandins(files):
                        if not working:
                            return files
                        newfiles = []
                        dirstate = self.dirstate
                        for f in files:
                            sf = lfutil.standin(f)
                            if sf in dirstate:
                                newfiles.append(sf)
                            elif sf in dirstate.dirs():
                                # Directory entries could be regular or
                                # standin, check both
                                newfiles.extend((f, sf))
                            else:
                                newfiles.append(f)
                        return newfiles

                    m = copy.copy(match)
                    m._files = tostandins(m._files)

                    result = super(lfilesrepo,
                                   self).status(node1, node2, m, ignored,
                                                clean, unknown, listsubrepos)
                    if working:

                        def sfindirstate(f):
                            sf = lfutil.standin(f)
                            dirstate = self.dirstate
                            return sf in dirstate or sf in dirstate.dirs()

                        match._files = [
                            f for f in match._files if sfindirstate(f)
                        ]
                        # Don't waste time getting the ignored and unknown
                        # files from lfdirstate
                        s = lfdirstate.status(match, [], False, listclean,
                                              False)
                        (unsure, modified, added, removed, missing, _unknown,
                         _ignored, clean) = s
                        if parentworking:
                            for lfile in unsure:
                                standin = lfutil.standin(lfile)
                                if standin not in ctx1:
                                    # from second parent
                                    modified.append(lfile)
                                elif ctx1[standin].data().strip() \
                                        != lfutil.hashfile(self.wjoin(lfile)):
                                    modified.append(lfile)
                                else:
                                    clean.append(lfile)
                                    lfdirstate.normal(lfile)
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for lfile in tocheck:
                                standin = lfutil.standin(lfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != \
                                            lfutil.hashfile(self.wjoin(lfile)):
                                        modified.append(lfile)
                                    else:
                                        clean.append(lfile)
                                else:
                                    added.append(lfile)

                        # Standins no longer found in lfdirstate has been
                        # removed
                        for standin in ctx1.manifest():
                            if not lfutil.isstandin(standin):
                                continue
                            lfile = lfutil.splitstandin(standin)
                            if not match(lfile):
                                continue
                            if lfile not in lfdirstate:
                                removed.append(lfile)

                        # Filter result lists
                        result = list(result)

                        # Largefiles are not really removed when they're
                        # still in the normal dirstate. Likewise, normal
                        # files are not really removed if they are still in
                        # lfdirstate. This happens in merges where files
                        # change type.
                        removed = [
                            f for f in removed if f not in self.dirstate
                        ]
                        result[2] = [
                            f for f in result[2] if f not in lfdirstate
                        ]

                        lfiles = set(lfdirstate._map)
                        # Unknown files
                        result[4] = set(result[4]).difference(lfiles)
                        # Ignored files
                        result[5] = set(result[5]).difference(lfiles)
                        # combine normal files and largefiles
                        normals = [[
                            fn for fn in filelist if not lfutil.isstandin(fn)
                        ] for filelist in result]
                        lfiles = (modified, added, removed, missing, [], [],
                                  clean)
                        result = [
                            sorted(list1 + list2)
                            for (list1, list2) in zip(normals, lfiles)
                        ]
                    else:

                        def toname(f):
                            if lfutil.isstandin(f):
                                return lfutil.splitstandin(f)
                            return f

                        result = [[toname(f) for f in items]
                                  for items in result]

                    if wlock:
                        lfdirstate.write()

                finally:
                    if wlock:
                        wlock.release()

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.lfstatus = True
                return result
Пример #50
0
def wraprepo(repo):
    class shallowrepository(repo.__class__):
        @util.propertycache
        def name(self):
            return self.ui.config(b'remotefilelog', b'reponame')

        @util.propertycache
        def fallbackpath(self):
            path = repo.ui.config(
                b"remotefilelog",
                b"fallbackpath",
                repo.ui.config(b'paths', b'default'),
            )
            if not path:
                raise error.Abort(
                    b"no remotefilelog server "
                    b"configured - is your .hg/hgrc trusted?"
                )

            return path

        def maybesparsematch(self, *revs, **kwargs):
            """
            A wrapper that allows the remotefilelog to invoke sparsematch() if
            this is a sparse repository, or returns None if this is not a
            sparse repository.
            """
            if revs:
                ret = sparse.matcher(repo, revs=revs)
            else:
                ret = sparse.matcher(repo)

            if ret.always():
                return None
            return ret

        def file(self, f):
            if f[0] == b'/':
                f = f[1:]

            if self.shallowmatch(f):
                return remotefilelog.remotefilelog(self.svfs, f, self)
            else:
                return super(shallowrepository, self).file(f)

        def filectx(self, path, *args, **kwargs):
            if self.shallowmatch(path):
                return remotefilectx.remotefilectx(self, path, *args, **kwargs)
            else:
                return super(shallowrepository, self).filectx(
                    path, *args, **kwargs
                )

        @localrepo.unfilteredmethod
        def commitctx(self, ctx, error=False, origctx=None):
            """Add a new revision to current repository.
            Revision information is passed via the context argument.
            """

            # some contexts already have manifest nodes, they don't need any
            # prefetching (for example if we're just editing a commit message
            # we can reuse manifest
            if not ctx.manifestnode():
                # prefetch files that will likely be compared
                m1 = ctx.p1().manifest()
                files = []
                for f in ctx.modified() + ctx.added():
                    fparent1 = m1.get(f, nullid)
                    if fparent1 != nullid:
                        files.append((f, hex(fparent1)))
                self.fileservice.prefetch(files)
            return super(shallowrepository, self).commitctx(
                ctx, error=error, origctx=origctx
            )

        def backgroundprefetch(
            self, revs, base=None, repack=False, pats=None, opts=None
        ):
            """Runs prefetch in background with optional repack"""
            cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch']
            if repack:
                cmd.append(b'--repack')
            if revs:
                cmd += [b'-r', revs]
            # We know this command will find a binary, so don't block
            # on it starting.
            kwargs = {}
            if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'):
                kwargs['record_wait'] = repo.ui.atexit

            procutil.runbgcommand(
                cmd, encoding.environ, ensurestart=False, **kwargs
            )

        def prefetch(self, revs, base=None, pats=None, opts=None):
            """Prefetches all the necessary file revisions for the given revs
            Optionally runs repack in background
            """
            with repo._lock(
                repo.svfs,
                b'prefetchlock',
                True,
                None,
                None,
                _(b'prefetching in %s') % repo.origroot,
            ):
                self._prefetch(revs, base, pats, opts)

        def _prefetch(self, revs, base=None, pats=None, opts=None):
            fallbackpath = self.fallbackpath
            if fallbackpath:
                # If we know a rev is on the server, we should fetch the server
                # version of those files, since our local file versions might
                # become obsolete if the local commits are stripped.
                localrevs = repo.revs(b'outgoing(%s)', fallbackpath)
                if base is not None and base != nullrev:
                    serverbase = list(
                        repo.revs(
                            b'first(reverse(::%s) - %ld)', base, localrevs
                        )
                    )
                    if serverbase:
                        base = serverbase[0]
            else:
                localrevs = repo

            mfl = repo.manifestlog
            mfrevlog = mfl.getstorage(b'')
            if base is not None:
                mfdict = mfl[repo[base].manifestnode()].read()
                skip = set(pycompat.iteritems(mfdict))
            else:
                skip = set()

            # Copy the skip set to start large and avoid constant resizing,
            # and since it's likely to be very similar to the prefetch set.
            files = skip.copy()
            serverfiles = skip.copy()
            visited = set()
            visited.add(nullrev)
            revcount = len(revs)
            progress = self.ui.makeprogress(_(b'prefetching'), total=revcount)
            progress.update(0)
            for rev in sorted(revs):
                ctx = repo[rev]
                if pats:
                    m = scmutil.match(ctx, pats, opts)
                sparsematch = repo.maybesparsematch(rev)

                mfnode = ctx.manifestnode()
                mfrev = mfrevlog.rev(mfnode)

                # Decompressing manifests is expensive.
                # When possible, only read the deltas.
                p1, p2 = mfrevlog.parentrevs(mfrev)
                if p1 in visited and p2 in visited:
                    mfdict = mfl[mfnode].readfast()
                else:
                    mfdict = mfl[mfnode].read()

                diff = pycompat.iteritems(mfdict)
                if pats:
                    diff = (pf for pf in diff if m(pf[0]))
                if sparsematch:
                    diff = (pf for pf in diff if sparsematch(pf[0]))
                if rev not in localrevs:
                    serverfiles.update(diff)
                else:
                    files.update(diff)

                visited.add(mfrev)
                progress.increment()

            files.difference_update(skip)
            serverfiles.difference_update(skip)
            progress.complete()

            # Fetch files known to be on the server
            if serverfiles:
                results = [(path, hex(fnode)) for (path, fnode) in serverfiles]
                repo.fileservice.prefetch(results, force=True)

            # Fetch files that may or may not be on the server
            if files:
                results = [(path, hex(fnode)) for (path, fnode) in files]
                repo.fileservice.prefetch(results)

        def close(self):
            super(shallowrepository, self).close()
            self.connectionpool.close()

    repo.__class__ = shallowrepository

    repo.shallowmatch = match.always()

    makeunionstores(repo)

    repo.includepattern = repo.ui.configlist(
        b"remotefilelog", b"includepattern", None
    )
    repo.excludepattern = repo.ui.configlist(
        b"remotefilelog", b"excludepattern", None
    )
    if not util.safehasattr(repo, 'connectionpool'):
        repo.connectionpool = connectionpool.connectionpool(repo)

    if repo.includepattern or repo.excludepattern:
        repo.shallowmatch = match.match(
            repo.root, b'', None, repo.includepattern, repo.excludepattern
        )
Пример #51
0
        def commit(self, text="", user=None, date=None, match=None,
                force=False, editor=False, extra={}):
            orig = super(lfiles_repo, self).commit

            wlock = repo.wlock()
            try:
                if getattr(repo, "_isrebasing", False):
                    # We have to take the time to pull down the new
                    # largefiles now. Otherwise if we are rebasing,
                    # any largefiles that were modified in the
                    # destination changesets get overwritten, either
                    # by the rebase or in the first commit after the
                    # rebase.
                    lfcommands.updatelfiles(repo.ui, repo)
                # Case 1: user calls commit with no specific files or
                # include/exclude patterns: refresh and commit all files that
                # are "dirty".
                if ((match is None) or
                    (not match.anypats() and not match.files())):
                    # Spend a bit of time here to get a list of files we know
                    # are modified so we can compare only against those.
                    # It can cost a lot of time (several seconds)
                    # otherwise to update all standins if the largefiles are
                    # large.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    dirtymatch = match_.always(repo.root, repo.getcwd())
                    s = lfdirstate.status(dirtymatch, [], False, False, False)
                    modifiedfiles = []
                    for i in s:
                        modifiedfiles.extend(i)
                    lfiles = lfutil.listlfiles(self)
                    # this only loops through largefiles that exist (not
                    # removed/renamed)
                    for lfile in lfiles:
                        if lfile in modifiedfiles:
                            if os.path.exists(self.wjoin(lfutil.standin(lfile))):
                                # this handles the case where a rebase is being
                                # performed and the working copy is not updated
                                # yet.
                                if os.path.exists(self.wjoin(lfile)):
                                    lfutil.updatestandin(self,
                                        lfutil.standin(lfile))
                                    lfdirstate.normal(lfile)
                    for lfile in lfdirstate:
                        if lfile in modifiedfiles:
                            if not os.path.exists(
                                    repo.wjoin(lfutil.standin(lfile))):
                                lfdirstate.drop(lfile)
                    lfdirstate.write()

                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                for f in match.files():
                    if lfutil.isstandin(f):
                        raise util.Abort(
                            _('file "%s" is a largefile standin') % f,
                            hint=('commit the largefile itself instead'))

                # Case 2: user calls commit with specified patterns: refresh
                # any matching big files.
                smatcher = lfutil.composestandinmatcher(self, match)
                standins = lfutil.dirstate_walk(self.dirstate, smatcher)

                # No matching big files: get out of the way and pass control to
                # the usual commit() method.
                if not standins:
                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                # Refresh all matching big files.  It's possible that the
                # commit will end up failing, in which case the big files will
                # stay refreshed.  No harm done: the user modified them and
                # asked to commit them, so sooner or later we're going to
                # refresh the standins.  Might as well leave them refreshed.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                for standin in standins:
                    lfile = lfutil.splitstandin(standin)
                    if lfdirstate[lfile] <> 'r':
                        lfutil.updatestandin(self, standin)
                        lfdirstate.normal(lfile)
                    else:
                        lfdirstate.drop(lfile)
                lfdirstate.write()

                # Cook up a new matcher that only matches regular files or
                # standins corresponding to the big files requested by the
                # user.  Have to modify _files to prevent commit() from
                # complaining "not tracked" for big files.
                lfiles = lfutil.listlfiles(repo)
                match = copy.copy(match)
                orig_matchfn = match.matchfn

                # Check both the list of largefiles and the list of
                # standins because if a largefile was removed, it
                # won't be in the list of largefiles at this point
                match._files += sorted(standins)

                actualfiles = []
                for f in match._files:
                    fstandin = lfutil.standin(f)

                    # ignore known largefiles and standins
                    if f in lfiles or fstandin in standins:
                        continue

                    # append directory separator to avoid collisions
                    if not fstandin.endswith(os.sep):
                        fstandin += os.sep

                    # prevalidate matching standin directories
                    if util.any(st for st in match._files
                                   if st.startswith(fstandin)):
                        continue
                    actualfiles.append(f)
                match._files = actualfiles

                def matchfn(f):
                    if orig_matchfn(f):
                        return f not in lfiles
                    else:
                        return f in standins

                match.matchfn = matchfn
                return orig(text=text, user=user, date=date, match=match,
                                force=force, editor=editor, extra=extra)
            finally:
                wlock.release()
Пример #52
0
            except Exception, inst:
                seriespath = os.path.join(self.path, 'series')
                if os.path.exists(seriespath):
                    os.unlink(seriespath)
                p1 = repo.dirstate.p1()
                p2 = node
                self.log(user, date, message, p1, p2, merge=merge)
                self.ui.write(str(inst) + '\n')
                raise TransplantError(_('fix up the merge and run '
                                        'hg transplant --continue'))
        else:
            files = None
        if merge:
            p1, p2 = repo.dirstate.parents()
            repo.setparents(p1, node)
            m = match.always(repo.root, '')
        else:
            m = match.exact(repo.root, '', files)

        n = repo.commit(message, user, date, extra=extra, match=m,
                        editor=self.editor)
        if not n:
            self.ui.warn(_('skipping emptied changeset %s\n') % short(node))
            return None
        if not merge:
            self.transplants.set(n, node)

        return n

    def resume(self, repo, source, opts):
        '''recover last transaction and apply remaining changesets'''
Пример #53
0
        def status(self, node1='.', node2=None, match=None, ignored=False,
                clean=False, unknown=False, listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            orig = super(lfilesrepo, self).status
            if not self.lfstatus:
                return orig(node1, node2, match, listignored, listclean,
                            listunknown, listsubrepos)

            # some calls in this function rely on the old version of status
            self.lfstatus = False
            ctx1 = self[node1]
            ctx2 = self[node2]
            working = ctx2.rev() is None
            parentworking = working and ctx1 == self['.']

            if match is None:
                match = match_.always(self.root, self.getcwd())

            wlock = None
            try:
                try:
                    # updating the dirstate is optional
                    # so we don't wait on the lock
                    wlock = self.wlock(False)
                except error.LockError:
                    pass

                # First check if paths or patterns were specified on the
                # command line.  If there were, and they don't match any
                # largefiles, we should just bail here and let super
                # handle it -- thus gaining a big performance boost.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                if not match.always():
                    for f in lfdirstate:
                        if match(f):
                            break
                    else:
                        return orig(node1, node2, match, listignored, listclean,
                                    listunknown, listsubrepos)

                # Create a copy of match that matches standins instead
                # of largefiles.
                def tostandins(files):
                    if not working:
                        return files
                    newfiles = []
                    dirstate = self.dirstate
                    for f in files:
                        sf = lfutil.standin(f)
                        if sf in dirstate:
                            newfiles.append(sf)
                        elif sf in dirstate.dirs():
                            # Directory entries could be regular or
                            # standin, check both
                            newfiles.extend((f, sf))
                        else:
                            newfiles.append(f)
                    return newfiles

                m = copy.copy(match)
                m._files = tostandins(m._files)

                result = orig(node1, node2, m, ignored, clean, unknown,
                              listsubrepos)
                if working:

                    def sfindirstate(f):
                        sf = lfutil.standin(f)
                        dirstate = self.dirstate
                        return sf in dirstate or sf in dirstate.dirs()

                    match._files = [f for f in match._files
                                    if sfindirstate(f)]
                    # Don't waste time getting the ignored and unknown
                    # files from lfdirstate
                    unsure, s = lfdirstate.status(match, [], False, listclean,
                                                  False)
                    (modified, added, removed, clean) = (s.modified, s.added,
                                                         s.removed, s.clean)
                    if parentworking:
                        for lfile in unsure:
                            standin = lfutil.standin(lfile)
                            if standin not in ctx1:
                                # from second parent
                                modified.append(lfile)
                            elif ctx1[standin].data().strip() \
                                    != lfutil.hashfile(self.wjoin(lfile)):
                                modified.append(lfile)
                            else:
                                if listclean:
                                    clean.append(lfile)
                                lfdirstate.normal(lfile)
                    else:
                        tocheck = unsure + modified + added + clean
                        modified, added, clean = [], [], []
                        checkexec = self.dirstate._checkexec

                        for lfile in tocheck:
                            standin = lfutil.standin(lfile)
                            if standin in ctx1:
                                abslfile = self.wjoin(lfile)
                                if ((ctx1[standin].data().strip() !=
                                     lfutil.hashfile(abslfile)) or
                                    (checkexec and
                                     ('x' in ctx1.flags(standin)) !=
                                     bool(lfutil.getexecutable(abslfile)))):
                                    modified.append(lfile)
                                elif listclean:
                                    clean.append(lfile)
                            else:
                                added.append(lfile)

                        # at this point, 'removed' contains largefiles
                        # marked as 'R' in the working context.
                        # then, largefiles not managed also in the target
                        # context should be excluded from 'removed'.
                        removed = [lfile for lfile in removed
                                   if lfutil.standin(lfile) in ctx1]

                    # Standins no longer found in lfdirstate has been
                    # removed
                    for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
                        lfile = lfutil.splitstandin(standin)
                        if not match(lfile):
                            continue
                        if lfile not in lfdirstate:
                            removed.append(lfile)

                    # Filter result lists
                    result = list(result)

                    # Largefiles are not really removed when they're
                    # still in the normal dirstate. Likewise, normal
                    # files are not really removed if they are still in
                    # lfdirstate. This happens in merges where files
                    # change type.
                    removed = [f for f in removed
                               if f not in self.dirstate]
                    result[2] = [f for f in result[2]
                                 if f not in lfdirstate]

                    lfiles = set(lfdirstate._map)
                    # Unknown files
                    result[4] = set(result[4]).difference(lfiles)
                    # Ignored files
                    result[5] = set(result[5]).difference(lfiles)
                    # combine normal files and largefiles
                    normals = [[fn for fn in filelist
                                if not lfutil.isstandin(fn)]
                               for filelist in result]
                    lfstatus = (modified, added, removed, s.deleted, [], [],
                                clean)
                    result = [sorted(list1 + list2)
                              for (list1, list2) in zip(normals, lfstatus)]
                else: # not against working directory
                    result = [[lfutil.splitstandin(f) or f for f in items]
                              for items in result]

                if wlock:
                    lfdirstate.write()

            finally:
                if wlock:
                    wlock.release()

            self.lfstatus = True
            return scmutil.status(*result)
Пример #54
0
        def commit(self, text="", user=None, date=None, match=None,
                force=False, editor=False, extra={}):
            orig = super(lfilesrepo, self).commit

            wlock = self.wlock()
            try:
                # Case 0: Rebase or Transplant
                # We have to take the time to pull down the new largefiles now.
                # Otherwise, any largefiles that were modified in the
                # destination changesets get overwritten, either by the rebase
                # or in the first commit after the rebase or transplant.
                # updatelfiles will update the dirstate to mark any pulled
                # largefiles as modified
                if getattr(self, "_isrebasing", False) or \
                        getattr(self, "_istransplanting", False):
                    lfcommands.updatelfiles(self.ui, self, filelist=None,
                                            printmessage=False)
                    result = orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)
                    return result
                # Case 1: user calls commit with no specific files or
                # include/exclude patterns: refresh and commit all files that
                # are "dirty".
                if ((match is None) or
                    (not match.anypats() and not match.files())):
                    # Spend a bit of time here to get a list of files we know
                    # are modified so we can compare only against those.
                    # It can cost a lot of time (several seconds)
                    # otherwise to update all standins if the largefiles are
                    # large.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    dirtymatch = match_.always(self.root, self.getcwd())
                    s = lfdirstate.status(dirtymatch, [], False, False, False)
                    (unsure, modified, added, removed, _missing, _unknown,
                            _ignored, _clean) = s
                    modifiedfiles = unsure + modified + added + removed
                    lfiles = lfutil.listlfiles(self)
                    # this only loops through largefiles that exist (not
                    # removed/renamed)
                    for lfile in lfiles:
                        if lfile in modifiedfiles:
                            if os.path.exists(
                                    self.wjoin(lfutil.standin(lfile))):
                                # this handles the case where a rebase is being
                                # performed and the working copy is not updated
                                # yet.
                                if os.path.exists(self.wjoin(lfile)):
                                    lfutil.updatestandin(self,
                                        lfutil.standin(lfile))
                                    lfdirstate.normal(lfile)

                    result = orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                    if result is not None:
                        for lfile in lfdirstate:
                            if lfile in modifiedfiles:
                                if (not os.path.exists(self.wjoin(
                                   lfutil.standin(lfile)))) or \
                                   (not os.path.exists(self.wjoin(lfile))):
                                    lfdirstate.drop(lfile)

                    # This needs to be after commit; otherwise precommit hooks
                    # get the wrong status
                    lfdirstate.write()
                    return result

                lfiles = lfutil.listlfiles(self)
                match._files = self._subdirlfs(match.files(), lfiles)

                # Case 2: user calls commit with specified patterns: refresh
                # any matching big files.
                smatcher = lfutil.composestandinmatcher(self, match)
                standins = self.dirstate.walk(smatcher, [], False, False)

                # No matching big files: get out of the way and pass control to
                # the usual commit() method.
                if not standins:
                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                # Refresh all matching big files.  It's possible that the
                # commit will end up failing, in which case the big files will
                # stay refreshed.  No harm done: the user modified them and
                # asked to commit them, so sooner or later we're going to
                # refresh the standins.  Might as well leave them refreshed.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                for standin in standins:
                    lfile = lfutil.splitstandin(standin)
                    if lfdirstate[lfile] != 'r':
                        lfutil.updatestandin(self, standin)
                        lfdirstate.normal(lfile)
                    else:
                        lfdirstate.drop(lfile)

                # Cook up a new matcher that only matches regular files or
                # standins corresponding to the big files requested by the
                # user.  Have to modify _files to prevent commit() from
                # complaining "not tracked" for big files.
                match = copy.copy(match)
                origmatchfn = match.matchfn

                # Check both the list of largefiles and the list of
                # standins because if a largefile was removed, it
                # won't be in the list of largefiles at this point
                match._files += sorted(standins)

                actualfiles = []
                for f in match._files:
                    fstandin = lfutil.standin(f)

                    # ignore known largefiles and standins
                    if f in lfiles or fstandin in standins:
                        continue

                    # append directory separator to avoid collisions
                    if not fstandin.endswith(os.sep):
                        fstandin += os.sep

                    actualfiles.append(f)
                match._files = actualfiles

                def matchfn(f):
                    if origmatchfn(f):
                        return f not in lfiles
                    else:
                        return f in standins

                match.matchfn = matchfn
                result = orig(text=text, user=user, date=date, match=match,
                                force=force, editor=editor, extra=extra)
                # This needs to be after commit; otherwise precommit hooks
                # get the wrong status
                lfdirstate.write()
                return result
            finally:
                wlock.release()
Пример #55
0
def revert_bfiles(ui, repo):
    wlock = repo.wlock()
    try:
        bfdirstate = bfutil.open_bfdirstate(ui, repo)
        s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
                              False, False, False)
        (unsure, modified, added, removed, missing, unknown, ignored,
         clean) = s

        bfiles = bfutil.list_bfiles(repo)
        toget = []
        at = 0
        updated = 0
        for bfile in bfiles:
            if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
                bfdirstate.remove(bfile)
                continue
            if os.path.exists(
                    repo.wjoin(bfutil.standin(os.path.join(bfile + '.orig')))):
                shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + '.orig'))
            at += 1
            expectedhash = repo[None][bfutil.standin(bfile)].data().strip()
            mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode
            if not os.path.exists(
                    repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(
                        repo.wjoin(bfile)):
                path = bfutil.find_file(repo, expectedhash)
                if path is None:
                    toget.append((bfile, expectedhash))
                else:
                    util.makedirs(os.path.dirname(repo.wjoin(bfile)))
                    shutil.copy(path, repo.wjoin(bfile))
                    os.chmod(repo.wjoin(bfile), mode)
                    updated += 1
                    if bfutil.standin(bfile) not in repo['.']:
                        bfdirstate.add(bfutil.unixpath(bfile))
                    elif expectedhash == repo['.'][bfutil.standin(
                            bfile)].data().strip():
                        bfdirstate.normal(bfutil.unixpath(bfile))
                    else:
                        bfutil.dirstate_normaldirty(bfdirstate,
                                                    bfutil.unixpath(bfile))
            elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat(
                    repo.wjoin(bfile)).st_mode:
                os.chmod(repo.wjoin(bfile), mode)
                updated += 1
                if bfutil.standin(bfile) not in repo['.']:
                    bfdirstate.add(bfutil.unixpath(bfile))
                elif expectedhash == repo['.'][bfutil.standin(
                        bfile)].data().strip():
                    bfdirstate.normal(bfutil.unixpath(bfile))
                else:
                    bfutil.dirstate_normaldirty(bfdirstate,
                                                bfutil.unixpath(bfile))

        if toget:
            store = basestore._open_store(repo)
            (success, missing) = store.get(toget)
        else:
            success, missing = [], []

        for (filename, hash) in success:
            mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode
            os.chmod(repo.wjoin(filename), mode)
            updated += 1
            if bfutil.standin(filename) not in repo['.']:
                bfdirstate.add(bfutil.unixpath(filename))
            elif hash == repo['.'][bfutil.standin(filename)].data().strip():
                bfdirstate.normal(bfutil.unixpath(filename))
            else:
                bfutil.dirstate_normaldirty(bfdirstate,
                                            bfutil.unixpath(filename))

        removed = 0
        for bfile in bfdirstate:
            if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
                if os.path.exists(repo.wjoin(bfile)):
                    os.unlink(repo.wjoin(bfile))
                    removed += 1
                    if bfutil.standin(bfile) in repo['.']:
                        bfdirstate.remove(bfutil.unixpath(bfile))
                    else:
                        bfdirstate.forget(bfutil.unixpath(bfile))
            else:
                state = repo.dirstate[bfutil.standin(bfile)]
                if state == 'n':
                    bfdirstate.normal(bfile)
                elif state == 'r':
                    bfdirstate.remove(bfile)
                elif state == 'a':
                    bfdirstate.add(bfile)
                elif state == '?':
                    bfdirstate.forget(bfile)
        bfdirstate.write()
    finally:
        wlock.release()
Пример #56
0
def update_bfiles(ui, repo):
    wlock = repo.wlock()
    try:
        bfdirstate = bfutil.open_bfdirstate(ui, repo)
        s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
                              False, False, False)
        (unsure, modified, added, removed, missing, unknown, ignored,
         clean) = s

        bfiles = bfutil.list_bfiles(repo)
        toget = []
        at = 0
        updated = 0
        removed = 0
        printed = False
        if bfiles:
            ui.status(_('Getting changed bfiles\n'))
            printed = True

        for bfile in bfiles:
            at += 1
            if os.path.exists(repo.wjoin(bfile)) and not os.path.exists(
                    repo.wjoin(bfutil.standin(bfile))):
                os.unlink(repo.wjoin(bfile))
                removed += 1
                bfdirstate.forget(bfutil.unixpath(bfile))
                continue
            expectedhash = repo[None][bfutil.standin(bfile)].data().strip()
            mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode
            if not os.path.exists(
                    repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(
                        repo.wjoin(bfile)):
                path = bfutil.find_file(repo, expectedhash)
                if not path:
                    toget.append((bfile, expectedhash))
                else:
                    util.makedirs(os.path.dirname(repo.wjoin(bfile)))
                    shutil.copy(path, repo.wjoin(bfile))
                    os.chmod(repo.wjoin(bfile), mode)
                    updated += 1
                    bfdirstate.normal(bfutil.unixpath(bfile))
            elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat(
                    repo.wjoin(bfile)).st_mode:
                os.chmod(repo.wjoin(bfile), mode)
                updated += 1
                bfdirstate.normal(bfutil.unixpath(bfile))

        if toget:
            store = basestore._open_store(repo)
            (success, missing) = store.get(toget)
        else:
            success, missing = [], []

        for (filename, hash) in success:
            mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode
            os.chmod(repo.wjoin(filename), mode)
            updated += 1
            bfdirstate.normal(bfutil.unixpath(filename))

        for bfile in bfdirstate:
            if bfile not in bfiles:
                if os.path.exists(repo.wjoin(bfile)):
                    if not printed:
                        ui.status(_('Getting changed bfiles\n'))
                        printed = True
                    os.unlink(repo.wjoin(bfile))
                    removed += 1
                    bfdirstate.forget(bfutil.unixpath(bfile))

        bfdirstate.write()
        if printed:
            ui.status(
                _('%d big files updated, %d removed\n') % (updated, removed))
    finally:
        wlock.release()
Пример #57
0
def updatestandinsbymatch(repo, match):
    '''Update standins in the working directory according to specified match

    This returns (possibly modified) ``match`` object to be used for
    subsequent commit process.
    '''

    ui = repo.ui

    # Case 1: user calls commit with no specific files or
    # include/exclude patterns: refresh and commit all files that
    # are "dirty".
    if match is None or match.always():
        # Spend a bit of time here to get a list of files we know
        # are modified so we can compare only against those.
        # It can cost a lot of time (several seconds)
        # otherwise to update all standins if the largefiles are
        # large.
        lfdirstate = openlfdirstate(ui, repo)
        dirtymatch = match_.always(repo.root, repo.getcwd())
        unsure, s = lfdirstate.status(dirtymatch, [], False, False,
                                      False)
        modifiedfiles = unsure + s.modified + s.added + s.removed
        lfiles = listlfiles(repo)
        # this only loops through largefiles that exist (not
        # removed/renamed)
        for lfile in lfiles:
            if lfile in modifiedfiles:
                if os.path.exists(
                        repo.wjoin(standin(lfile))):
                    # this handles the case where a rebase is being
                    # performed and the working copy is not updated
                    # yet.
                    if os.path.exists(repo.wjoin(lfile)):
                        updatestandin(repo,
                            standin(lfile))

        return match

    lfiles = listlfiles(repo)
    match._files = repo._subdirlfs(match.files(), lfiles)

    # Case 2: user calls commit with specified patterns: refresh
    # any matching big files.
    smatcher = composestandinmatcher(repo, match)
    standins = repo.dirstate.walk(smatcher, [], False, False)

    # No matching big files: get out of the way and pass control to
    # the usual commit() method.
    if not standins:
        return match

    # Refresh all matching big files.  It's possible that the
    # commit will end up failing, in which case the big files will
    # stay refreshed.  No harm done: the user modified them and
    # asked to commit them, so sooner or later we're going to
    # refresh the standins.  Might as well leave them refreshed.
    lfdirstate = openlfdirstate(ui, repo)
    for fstandin in standins:
        lfile = splitstandin(fstandin)
        if lfdirstate[lfile] != 'r':
            updatestandin(repo, fstandin)

    # Cook up a new matcher that only matches regular files or
    # standins corresponding to the big files requested by the
    # user.  Have to modify _files to prevent commit() from
    # complaining "not tracked" for big files.
    match = copy.copy(match)
    origmatchfn = match.matchfn

    # Check both the list of largefiles and the list of
    # standins because if a largefile was removed, it
    # won't be in the list of largefiles at this point
    match._files += sorted(standins)

    actualfiles = []
    for f in match._files:
        fstandin = standin(f)

        # ignore known largefiles and standins
        if f in lfiles or fstandin in standins:
            continue

        actualfiles.append(f)
    match._files = actualfiles

    def matchfn(f):
        if origmatchfn(f):
            return f not in lfiles
        else:
            return f in standins

    match.matchfn = matchfn

    return match
Пример #58
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing " b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse " b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
            ui.warn(
                b"(shared store does not belong to pooled storage; "
                b"deleting destination to improve efficiency)\n"
            )
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(
            b"(dest has an active working directory lock; assuming it is "
            b"left over from a previous process and that the destination "
            b"is corrupt; deleting it just to be sure)\n"
        )
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(
                b"reached maximum number of network attempts; " b"giving up\n"
            )

        ui.warn(
            b"(retrying after network failure on attempt %d of %d)\n"
            % (networkattempts[0], networkattemptlimit)
        )

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(_(b"unexpected response from remote server:")):
                ui.warn(b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" % pycompat.bytestr(e.reason))
                handlenetworkfailure()
                return True
            else:
                ui.warn(
                    b"unhandled URLError; reason type: %s; value: %s\n"
                    % (e.reason.__class__.__name__, e.reason)
                )
        else:
            ui.warn(
                b"unhandled exception during network operation; type: %s; "
                b"value: %s\n" % (e.__class__.__name__, e)
            )

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone " b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(
            b"(shared store has an active lock; assuming it is left "
            b"over from a previous process and that the store is "
            b"corrupt; deleting store and destination just to be "
            b"sure)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(
            b"(shared store missing requires file; this is a really "
            b"odd failure; deleting store and destination)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(
                b"(shared store missing requirements: %s; deleting "
                b"store and destination to ensure optimal behavior)\n"
                % b", ".join(sorted(missing))
            )
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires", "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(
                    b"(remote resolved %s to %s; "
                    b"result is not deterministic)\n" % (branch, checkoutrevision)
                )

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purgeext = extensions.find(b"purge")

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, ""
                    )

            with timeit("purge", "purge"):
                if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{"print": None, "print0": None, "dirs": None, "files": None}
                ):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(
                b"sparse profile %s does not exist at revision "
                b"%s" % (sparse_profile, checkoutrevision)
            )

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(
                repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
            )
        else:
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
            ui.write(
                b"(sparse profile %s already set; no need to update "
                b"sparse config)\n" % sparse_profile
            )
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(
                    b"(replacing existing sparse config with profile "
                    b"%s)\n" % sparse_profile
                )
            else:
                ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(
                            repo, [], [], [sparse_profile], force=True
                        ),
                    )
                )

                repo.ui.status(
                    b"%d files added, %d files dropped, "
                    b"%d files conflicting\n" % tuple(fcounts)
                )

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
Пример #59
0
def walk(repo, revs):
    '''Returns revisions in repo specified by the string revs'''
    return cmdutil.walkchangerevs(repo, match.always(repo.root, None), {'rev': [revs.encode('ascii', 'ignore')]}, lambda *args: None)