示例#1
0
def _hg_repository_sync(name, url, submits, do_update=True):
    ui_ = ui()
    repopath = os.path.join(settings.REPOSITORY_BASE, name)
    configpath = os.path.join(repopath, '.hg', 'hgrc')
    if not os.path.isfile(configpath):
        if not os.path.isdir(os.path.dirname(repopath)):
            os.makedirs(os.path.dirname(repopath))
        clone(ui_,
              str(url),
              str(repopath),
              pull=False,
              uncompressed=False,
              rev=[],
              noupdate=False)
        cfg = open(configpath, 'a')
        cfg.write('default-push = ssh%s\n' % str(url)[4:])
        cfg.close()
        ui_.readconfig(configpath)
        hgrepo = repository(ui_, repopath)
    else:
        ui_.readconfig(configpath)
        hgrepo = repository(ui_, repopath)
        cs = submits[-1].changesets[-1]
        try:
            hgrepo.changectx(cs)
        except RepoError:
            pull(ui_,
                 hgrepo,
                 source=str(url),
                 force=False,
                 update=False,
                 rev=[])
            if do_update:
                update(ui_, hgrepo)
    return hgrepo
示例#2
0
 def _createBranch(self, branch_name, message, from_branch = None):
     if not from_branch is None:
         #first update to from_branch
         commands.update(self.ui, self.repo, from_branch)
         
     commands.branch(self.ui, self.repo, branch_name)
     commands.commit(self.ui, self.repo, message = message)
示例#3
0
文件: hgflow.py 项目: djm/dotfiles
 def _createBranch(self, branch_name, message, from_branch = None):
     if not from_branch is None:
         #first update to from_branch
         commands.update(self.ui, self.repo, from_branch)
         
     commands.branch(self.ui, self.repo, branch_name)
     commands.commit(self.ui, self.repo, message = message)
示例#4
0
    def test_updatehgsub(self):
        def checkdeps(ui, repo, rev, deps, nodeps):
            commands.update(ui, repo, node=str(rev))
            for d in deps:
                p = os.path.join(repo.root, d)
                self.assertTrue(os.path.isdir(p),
                                'missing: %s@%r' % (d, repo[None].rev()))
            for d in nodeps:
                p = os.path.join(repo.root, d)
                self.assertTrue(not os.path.isdir(p),
                                'unexpected: %s@%r' % (d, repo[None].rev()))

        if subrepo is None:
            return

        ui = self.ui()
        repo = self._load_fixture_and_fetch('externals.svndump',
                                            stupid=0, externals='subrepos')
        checkdeps(ui, repo, 0, ['deps/project1'], [])
        checkdeps(ui, repo, 1, ['deps/project1', 'deps/project2'], [])
        checkdeps(ui, repo, 2, ['subdir/deps/project1', 'subdir2/deps/project1',
                   'deps/project2'],
                  ['deps/project1'])
        checkdeps(ui, repo, 3, ['subdir/deps/project1', 'deps/project2'],
                  ['subdir2/deps/project1'])
        checkdeps(ui, repo, 4, ['subdir/deps/project1'], ['deps/project2'])

        # Test update --clean, used to crash
        repo.wwrite('subdir/deps/project1/a', 'foobar', '')
        commands.update(ui, repo, node='4', clean=True)
示例#5
0
    def test_updatehgsub(self):
        def checkdeps(ui, repo, rev, deps, nodeps):
            commands.update(ui, repo, node=str(rev))
            for d in deps:
                p = os.path.join(repo.root, d)
                self.assertTrue(os.path.isdir(p),
                                'missing: %s@%r' % (d, repo[None].rev()))
            for d in nodeps:
                p = os.path.join(repo.root, d)
                self.assertTrue(not os.path.isdir(p),
                                'unexpected: %s@%r' % (d, repo[None].rev()))

        if subrepo is None:
            return

        ui = self.ui(subrepo=True)
        repo = self._load_fixture_and_fetch('externals.svndump',
                                            externals='subrepos')
        checkdeps(ui, repo, 0, ['deps/project1'], [])
        checkdeps(ui, repo, 1, ['deps/project1', 'deps/project2'], [])
        checkdeps(
            ui, repo, 2,
            ['subdir/deps/project1', 'subdir2/deps/project1', 'deps/project2'],
            ['deps/project1'])
        checkdeps(ui, repo, 3, ['subdir/deps/project1', 'deps/project2'],
                  ['subdir2/deps/project1'])
        checkdeps(ui, repo, 4, ['subdir/deps/project1'], ['deps/project2'])

        # Test update --clean, used to crash
        repo.wwrite('subdir/deps/project1/a', 'foobar', '')
        commands.update(ui, repo, node='4', clean=True)
示例#6
0
文件: vcs.py 项目: diasks2/pontoon
    def pull(self, source=None, target=None):
        from mercurial import commands, hg, ui, error
        log.debug("Clone or update HG repository.")

        source = source or self.source
        target = target or self.target

        # Folders need to be manually created
        if not os.path.exists(target):
            os.makedirs(target)

        # Doesn't work with unicode type
        url = str(source)
        path = str(target)

        try:
            repo = hg.repository(ui.ui(), path)
            commands.pull(ui.ui(), repo, source=url)
            commands.update(ui.ui(), repo)
            log.debug("Mercurial: repository at " + url + " updated.")
        except error.RepoError, e:
            log.debug("Mercurial: " + str(e))
            try:
                commands.clone(ui.ui(), url, path)
                log.debug("Mercurial: repository at " + url + " cloned.")
            except Exception, e:
                log.debug("Mercurial: " + str(e))
                raise PullFromRepositoryException(unicode(e))
示例#7
0
文件: hg.py 项目: alfredodeza/pacha
def hg_push_update(repo):
    u = ui.ui()
    repo = hg.repository(u, repo)
    repo.ui.pushbuffer()
    commands.update(ui.ui(), repo)
    hg_log.debug("updating repo: %s" % repo)
    hg_log.debug(repo.ui.popbuffer().split('\n')[0])
示例#8
0
    def hgflow_func_feature_finish(self, target_branch, name):
        '''finish this feature.
        1, Check develop branch version and current feature
        1, Close this branch
        2, Merge it into develop
        3, Commit develop branch
        '''

        if not self._findBranch(target_branch, name):
            return

        commands.update(self.ui, self.repo, target_branch)
        commands.commit(self.ui,
                        self.repo,
                        close_branch=True,
                        message='hg flow, close feature %s' % (name, ))

        commands.update(self.ui, self.repo, self.developBranch)
        commands.merge(self.ui, self.repo, target_branch)
        #commands.commit(self.ui, self.repo, message='hg flow, merge feature `%s` to develop branch `%s`' % (target_branch, self.developBranch))
        commands.commit(
            self.ui,
            self.repo,
            message='hg flow, merge release `%s` to develop branch `%s`' %
            (name, self.developBranch))
示例#9
0
    def update(self, branch=None):
        """ Update the local repository for recent changes. """
        if branch is None:
            branch = self.branch

        print "*** Updating to branch '%s'" % branch
        commands.pull(ui.ui(), self._repository, self.url)
        commands.update(ui.ui(), self._repository, None, branch, True)
    def update(self, branch=None):
        """ Update the local repository for recent changes. """

        if branch is None:
            branch = self.branch

        commands.pull(ui.ui(), self._repository, self.url)
        commands.update(ui.ui(), self._repository, None, branch, True)
    def clean_working_copy(self):
        self.strip_outgoing()

        self.ui.pushbuffer()
        try:
            commands.update(self.ui, self.repo, clean=True)
            commands.revert(self.ui, self.repo, all=True, no_backup=True)
        finally:
            self.ui.popbuffer()
示例#12
0
文件: hg.py 项目: dahool/vertaal
 def update(self):
     self._send_callback(self.callback_on_action_notify,_('Updating repository %s') % self._remote_path)        
     try:
         self.cleanup()
         commands.pull(self.repo.ui, self.repo, rev=None, force=False, update=True)
         commands.update(self.repo.ui, self.repo, self.branch)
         self._process_files()
     except RepoError, e:
         raise BrowserException, e
示例#13
0
文件: __init__.py 项目: fu2re/jsondb
 def update(self, stdout=None):
     """
     Pull and update all changes from hg repository
     Note that this command destroy all local changes
     """
     u, r = self._hg(stdout)
     commands.pull(u, r)
     commands.update(u, r, clean=True)
     del u, r
示例#14
0
文件: utils.py 项目: stasm/elmo
def handlePushes(repo_id, submits, do_update=True):
    if not submits:
        return
    repo = Repository.objects.get(id=repo_id)
    revisions = reduce(lambda r, l: r + l, [p.changesets for p in submits], [])
    ui = _ui()
    repopath = os.path.join(settings.REPOSITORY_BASE, repo.name)
    configpath = os.path.join(repopath, '.hg', 'hgrc')
    if not os.path.isfile(configpath):
        if not os.path.isdir(os.path.dirname(repopath)):
            os.makedirs(os.path.dirname(repopath))
        clone(ui,
              str(repo.url),
              str(repopath),
              pull=False,
              uncompressed=False,
              rev=[],
              noupdate=False)
        cfg = open(configpath, 'a')
        cfg.write('default-push = ssh%s\n' % str(repo.url)[4:])
        cfg.close()
        ui.readconfig(configpath)
        hgrepo = repository(ui, repopath)
    else:
        ui.readconfig(configpath)
        hgrepo = repository(ui, repopath)
        cs = submits[-1].changesets[-1]
        try:
            hgrepo.changectx(cs)
        except RepoError:
            pull(ui,
                 hgrepo,
                 source=str(repo.url),
                 force=False,
                 update=False,
                 rev=[])
            if do_update:
                update(ui, hgrepo)
    for data in submits:
        changesets = []
        for revision in data.changesets:
            try:
                cs = getChangeset(repo, hgrepo, revision)
                transaction.commit()
                changesets.append(cs)
            except Exception, e:
                transaction.rollback()
                raise
                print repo.name, e
        p = Push.objects.create(repository=repo,
                                push_id=data.id,
                                user=data.user,
                                push_date=datetime.utcfromtimestamp(data.date))
        p.changesets = changesets
        p.save()
        transaction.commit()
示例#15
0
 def checkdeps(ui, repo, rev, deps, nodeps):
     commands.update(ui, repo, node=str(rev))
     for d in deps:
         p = os.path.join(repo.root, d)
         self.assertTrue(os.path.isdir(p),
                         'missing: %s@%r' % (d, repo[None].rev()))
     for d in nodeps:
         p = os.path.join(repo.root, d)
         self.assertTrue(not os.path.isdir(p),
                         'unexpected: %s@%r' % (d, repo[None].rev()))
示例#16
0
 def checkdeps(ui, repo, rev, deps, nodeps):
     commands.update(ui, repo, node=str(rev))
     for d in deps:
         p = os.path.join(repo.root, d)
         self.assertTrue(os.path.isdir(p),
                         'missing: %s@%r' % (d, repo[None].rev()))
     for d in nodeps:
         p = os.path.join(repo.root, d)
         self.assertTrue(not os.path.isdir(p),
                         'unexpected: %s@%r' % (d, repo[None].rev()))
示例#17
0
文件: hgflow.py 项目: djm/dotfiles
    def hgflow_func_short(self, cmd, name, args, opts):
        if not self._checkInited():
            return
        
        if self._hasUncommit():
            return

        prefix = self.featurePrefix
        if cmd == 'hotfix':
            prefix = self.hotfixPrefix
        elif cmd == 'release':
            prefix = self.releasePrefix
            
        target_branch = '%s%s' % (prefix, name)
        tag_name = opts['rev']

        if not self._findBranch(target_branch, name):
            if not (opts['finish'] or opts['close'] or opts['switch']):
                if tag_name:
                    self.outputln(_('Start a new `%s` branch `%s` based on reversion `%s`' % (cmd, name, tag_name)))
                    self._startBranch(target_branch, tag_name)
                else:
                    '''
                    find the suit based branch
                    '''
                    if cmd == 'hotfix':
                        self.outputln(_('Start a new `%s` branch `%s` based on PUBLISH branch' % (cmd, name)))
                        self._startBranch(target_branch, self.publishBranch)
                    elif cmd == 'release' or cmd == 'feature':
                        self.outputln(_('Start a new `%s` branch `%s` based on DEVELOP branch' % (cmd, name)))
                        self._startBranch(target_branch, self.developBranch)
        else:
            if opts['finish']:
                '''
                find the suit based branch
                check the branches is already closed
                '''
                if cmd == 'hotfix':
                    self.hgflow_func_hotfix_finish(target_branch, name, tag_name)
                elif cmd == 'release':
                    self.hgflow_func_release_finish(target_branch, name, tag_name)
                elif cmd == 'feature':
                    self.hgflow_func_feature_finish(target_branch, name)
                    
            elif opts['close']:
                return
            else:
                #switch
                current_branch = hgflow_current_branch(self, prefix)
                if current_branch == name:
                    self.outputln(_('Already in `%s` branch `%s`, nothing happens.' % (cmd, name)))
                else:
                    self.outputln(_('Switch to `%s` branch `%s`.' % (cmd, name)))
                    commands.update(self.ui, self.repo, target_branch)
示例#18
0
def update_repos(rev):

	try:
		print >> OUTPUT_FILE, 'accessing repository: %s' % PORTAL_HOME
		repos = hg.repository(ui.ui(), PORTAL_HOME)
		print >> OUTPUT_FILE, 'updating to revision: %s' % rev
		commands.update(ui.ui(), repos, rev=rev, check=True)
	except Exception, e:
		print >> ERROR_FILE, "Error: %s" % e
		print >> ERROR_FILE, "Aborting."
		sys.exit(1)
    def test_get_revision_before_date_time_stays_on_branch(self):
        hgrepo = MercurialRepository(self.directory, init=True)
        dates = ('2011-01-01 01:01:01', '2011-03-03 03:03:03')
        self.create_test_changesets(hgrepo, 2, dates=dates)

        hgrepo.create_and_switch_to_branch('avoidbranch')
        self.create_test_changesets(hgrepo, 1, dates=('2011-02-02 02:02:02',))

        commands.update(hgrepo.get_ui(), hgrepo.get_repo(), rev='default')

        #should match commit on 2011-01-01, not 2011-02-02
        eq_('f957b16de26a4879c255762cee97797a64e28f28', hgrepo.get_revision_before_date(datetime(2011, 2, 2, 4, 4, 4)))
示例#20
0
def merge_into(ui, repo, rev, **opts):
    node = repo.changectx(rev)
    curr = repo.changectx(".")
    commands.update(ui, repo, rev=node.rev())
    commands.merge(ui, repo, rev=curr.rev())
    ticket = get_ticket_id(curr)
    message = "refs #%(ticket)s merged %(target)s -> %(branch)s" % {
        "ticket": ticket,
        "target": curr.branch(),
        "branch": node.branch(),
        }
    commands.commit(ui, repo, message=message)
示例#21
0
文件: utils.py 项目: lauraxt/elmo
def handlePushes(repo_id, submits, do_update=True):
    if not submits:
        return
    repo = Repository.objects.get(id=repo_id)
    revisions = reduce(lambda r,l: r+l,
                       [p.changesets for p in submits],
       [])
    ui = _ui()
    repopath = os.path.join(settings.REPOSITORY_BASE,
                            repo.name)
    configpath = os.path.join(repopath, '.hg', 'hgrc')
    if not os.path.isfile(configpath):
        if not os.path.isdir(os.path.dirname(repopath)):
            os.makedirs(os.path.dirname(repopath))
        clone(ui, str(repo.url), str(repopath),
              pull=False, uncompressed=False, rev=[],
              noupdate=False)
        cfg = open(configpath, 'a')
        cfg.write('default-push = ssh%s\n' % str(repo.url)[4:])
        cfg.close()
        ui.readconfig(configpath)
        hgrepo = repository(ui, repopath)
    else:
        ui.readconfig(configpath)
        hgrepo = repository(ui, repopath)
        cs = submits[-1].changesets[-1]
        try:
            hgrepo.changectx(cs)
        except RepoError:
            pull(ui, hgrepo, source = str(repo.url),
                 force=False, update=False,
                 rev=[])
            if do_update:
                update(ui, hgrepo)
    for data in submits:
        changesets = []
        for revision in data.changesets:
            try:
                cs = getChangeset(repo, hgrepo, revision)
                transaction.commit()
                changesets.append(cs)
            except Exception, e:
                transaction.rollback()
                raise
                print repo.name, e
        p = Push.objects.create(repository = repo,
                                push_id = data.id, user = data.user,
                                push_date =
                                datetime.utcfromtimestamp(data.date))
        p.changesets = changesets
        p.save()
        transaction.commit()
示例#22
0
文件: hg.py 项目: dahool/vertaal
    def init_repo(self):
        logger.debug("init")
        self._send_callback(self.callback_on_action_notify,_('Initializing repository %s') % self._remote_path)

        try:
            logger.debug("Checkout %s on %s" % (self._remote_path, self.location))

            remote_repo, repo = hg.clone(self.ui, self._remote_path, self.location)
            commands.update(repo.ui, repo, self.branch)
            self._process_files()
            logger.debug("end")
        except RepoError, e:
            raise BrowserException, e
示例#23
0
 def function(tree, ignore, opts):
     rev = opts['rev']
     if type(rev) is str:
         rev = rev
     elif rev:
         rev = rev[0]
     else:
         rev = None
     try:
         commands.update(ui, tree.getrepo(ui),
                         rev=rev, clean=opts['clean'], date=opts['date'])
     except Exception, err:
         ui.warn(_("skipped: %s\n") % err)
         tree.repo.transaction().__del__()
示例#24
0
 def pullAndMerge(self):
     """Run an hg pull and update.
     Overwrite all local changes by default.
     If anything goes wrong with the pull or update, clone instead.
     """
     try:
         self.chmod()
         commands.pull(self.ui, self.repo, source=self.url)
         self.chmod()
         commands.update(self.ui, self.repo, clean=True)
     except error.RepoError:
         if os.path.exists(REPO_DIR):
             shutil.rmtree(REPO_DIR)
             self.clone()
     return
 def checkout_hg(self):
     return 'hg', 'st'
     # pull new version of project from perository
     if not self.repo_path:
         # may be need to find repo recursively from this dir to up, but it's only may be.
         self.repo_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',))
     repo = hg.repository(
         ui.ui(),
         self.repo_path
     )
     url = dict(repo.ui.configitems('paths', 'default'))['default']
     commands.pull(ui.ui(), repo, url)
     # and update it
     commands.update(ui.ui(), repo)
     return
示例#26
0
    def test_push_single_dir_branch(self):
        # Tests local branches pushing to a single dir repo. Creates a fork at
        # tip. The default branch adds a file called default, while branch foo
        # adds a file called foo, then tries to push the foo branch and default
        # branch in that order.
        repo, repo_path = self.load_and_fetch('branch_from_tag.svndump',
                                              layout='single',
                                              subdir='')

        def file_callback(data):
            def cb(repo, memctx, path):
                if path == data:
                    return compathacks.makememfilectx(repo,
                                                      memctx=memctx,
                                                      path=path,
                                                      data=data,
                                                      islink=False,
                                                      isexec=False,
                                                      copied=False)
                raise IOError(errno.EINVAL, 'Invalid operation: ' + path)

            return cb

        def commit_to_branch(name, parent):
            repo.commitctx(
                context.memctx(repo, (parent, node.nullid),
                               'automated test (%s)' % name, [name],
                               file_callback(name), 'an_author',
                               '2009-10-19 18:49:30 -0500', {
                                   'branch': name,
                               }))

        parent = repo['tip'].node()
        commit_to_branch('default', parent)
        commit_to_branch('foo', parent)
        hg.update(repo, revsymbol(repo, 'foo').node())
        self.pushrevisions()
        repo = self.repo  # repo is outdated after the rebase happens, refresh
        self.assertTrue('foo' in test_util.svnls(repo_path, ''))
        self.assertEqual(set(repo.branchmap()), set(['default']))
        # Have to cross to another branch head, so hg.update doesn't work
        commands.update(self.ui(),
                        self.repo,
                        node.hex(self.repo.branchheads('default')[1]),
                        clean=True)
        self.pushrevisions()
        self.assertTrue('default' in test_util.svnls(repo_path, ''))
        self.assertEquals(len(self.repo.branchheads('default')), 1)
示例#27
0
def _upgrade(ui, repo):
    ext_dir = os.path.dirname(os.path.abspath(__file__))
    ui.debug('kiln: checking for extensions upgrade for %s\n' % ext_dir)

    try:
        r = localrepo.localrepository(hgui.ui(), ext_dir)
    except RepoError:
        commands.init(hgui.ui(), dest=ext_dir)
        r = localrepo.localrepository(hgui.ui(), ext_dir)

    r.ui.setconfig('kiln', 'autoupdate', False)
    r.ui.pushbuffer()
    try:
        source = 'https://developers.kilnhg.com/Repo/Kiln/Group/Kiln-Extensions'
        if commands.incoming(r.ui, r, bundle=None, force=False, source=source) != 0:
            # no incoming changesets, or an error. Don't try to upgrade.
            ui.debug('kiln: no extensions upgrade available\n')
            return
        ui.write(_('updating Kiln Extensions at %s... ') % ext_dir)
        # pull and update return falsy values on success
        if commands.pull(r.ui, r, source=source) or commands.update(r.ui, r, clean=True):
            url = urljoin(repo.url()[:repo.url().lower().index('/repo')], 'Tools')
            ui.write(_('unable to update\nvisit %s to download the newest extensions\n') % url)
        else:
            ui.write(_('complete\n'))
    except Exception, e:
        ui.debug(_('kiln: error updating Kiln Extensions: %s\n') % e)
示例#28
0
文件: kiln.py 项目: szechyjs/dotfiles
def _upgrade(ui, repo):
    ext_dir = os.path.dirname(os.path.abspath(__file__))
    ui.debug(_('kiln: checking for extensions upgrade for %s\n') % ext_dir)

    try:
        r = localrepo.localrepository(hgui.ui(), ext_dir)
    except RepoError:
        commands.init(hgui.ui(), dest=ext_dir)
        r = localrepo.localrepository(hgui.ui(), ext_dir)

    r.ui.setconfig('kiln', 'autoupdate', False)
    r.ui.pushbuffer()
    try:
        source = 'https://developers.kilnhg.com/Repo/Kiln/Group/Kiln-Extensions'
        if commands.incoming(r.ui, r, bundle=None, force=False, source=source) != 0:
            # no incoming changesets, or an error. Don't try to upgrade.
            ui.debug('kiln: no extensions upgrade available\n')
            return
        ui.write(_('updating Kiln Extensions at %s... ') % ext_dir)
        # pull and update return falsy values on success
        if commands.pull(r.ui, r, source=source) or commands.update(r.ui, r, clean=True):
            url = urljoin(repo.url()[:repo.url().lower().index('/repo')], 'Tools')
            ui.write(_('unable to update\nvisit %s to download the newest extensions\n') % url)
        else:
            ui.write(_('complete\n'))
    except Exception, e:
        ui.debug(_('kiln: error updating extensions: %s\n') % e)
        ui.debug(_('kiln: traceback: %s\n') % traceback.format_exc())
示例#29
0
def pull(orig, ui, repo, *args, **opts):
    """pull --rebase/--update are problematic without an explicit destination"""
    try:
        rebasemodule = extensions.find('rebase')
    except KeyError:
        rebasemodule = None

    rebase = opts.get('rebase')
    update = opts.get('update')
    isrebase = rebase or rebaseflag
    # Only use from the global rebasedest if _getrebasedest was called.  If the
    # user isn't using remotenames, then rebasedest isn't set.
    if rebaseflag:
        dest = rebasedest
    else:
        dest = opts.get('dest')

    if (isrebase or update) and not dest:
        dest = ui.config('tweakdefaults', 'defaultdest')

    if isrebase and update:
        mess = _('specify either rebase or update, not both')
        raise error.Abort(mess)

    if dest and not (isrebase or update):
        mess = _('only specify a destination if rebasing or updating')
        raise error.Abort(mess)

    if (isrebase or update) and not dest:
        if isrebase and bmactive(repo):
            mess = ui.config('tweakdefaults', 'bmnodestmsg')
            hint = ui.config('tweakdefaults', 'bmnodesthint')
        elif isrebase:
            mess = ui.config('tweakdefaults', 'nodestmsg')
            hint = ui.config('tweakdefaults', 'nodesthint')
        else: # update
            mess = _('you must specify a destination for the update')
            hint = _('use `hg pull --update --dest <destination>`')
        raise error.Abort(mess, hint=hint)

    if 'rebase' in opts:
        del opts['rebase']
        tool = opts.pop('tool', '')
    if 'update' in opts:
        del opts['update']
    if 'dest' in opts:
        del opts['dest']

    ret = orig(ui, repo, *args, **opts)

    # NB: we use rebase and not isrebase on the next line because
    # remotenames may have already handled the rebase.
    if dest and rebase:
        ret = ret or rebaseorfastforward(rebasemodule.rebase, ui, repo,
                                         dest=dest, tool=tool)
    if dest and update:
        ret = ret or commands.update(ui, repo, node=dest, check=True)

    return ret
示例#30
0
def update_repository(repo, ctx_rev=None):
    """
    Update the cloned repository to changeset_revision.  It is critical that the installed repository is updated to the desired
    changeset_revision before metadata is set because the process for setting metadata uses the repository files on disk.
    """
    # TODO: We may have files on disk in the repo directory that aren't being tracked, so they must be removed.
    # The codes used to show the status of files are as follows.
    # M = modified
    # A = added
    # R = removed
    # C = clean
    # ! = deleted, but still tracked
    # ? = not tracked
    # I = ignored
    # It would be nice if we could use mercurial's purge extension to remove untracked files.  The problem is that
    # purging is not supported by the mercurial API.
    commands.update(get_configured_ui(), repo, rev=ctx_rev)
示例#31
0
def _hg_repository_sync(name, url, submits, do_update=True):
    ui_ = ui()
    repopath = os.path.join(settings.REPOSITORY_BASE, name)
    configpath = os.path.join(repopath, '.hg', 'hgrc')
    if not os.path.isfile(configpath):
        if not os.path.isdir(os.path.dirname(repopath)):
            os.makedirs(os.path.dirname(repopath))
        clone(ui_,
              str(url),
              str(repopath),
              pull=False,
              uncompressed=False,
              rev=[],
              noupdate=False)
        cfg = open(configpath, 'a')
        cfg.write('default-push = ssh%s\n' % str(url)[4:])
        cfg.close()
        ui_.readconfig(configpath)
        hgrepo = repository(ui_, repopath)
    else:
        ui_.readconfig(configpath)
        hgrepo = repository(ui_, repopath)
        cs = submits[-1].changesets[-1]
        try:
            hgrepo.changectx(cs)
        except RepoError:
            pull(ui_,
                 hgrepo,
                 source=str(url),
                 force=False,
                 update=False,
                 rev=[])
            if do_update:
                # Make sure that we're not triggering workers in post 2.6
                # hg. That's not stable, at least as we do it.
                # Monkey patch time
                try:
                    from mercurial import worker
                    if hasattr(worker, '_startupcost'):
                        # use same value as hg for non-posix
                        worker._startupcost = 1e30
                except ImportError:
                    # no worker, no problem
                    pass
                update(ui_, hgrepo)
    return hgrepo
    def test_push_single_dir_branch(self):
        # Tests local branches pushing to a single dir repo. Creates a fork at
        # tip. The default branch adds a file called default, while branch foo
        # adds a file called foo, then tries to push the foo branch and default
        # branch in that order.
        repo = self._load_fixture_and_fetch('branch_from_tag.svndump',
                                            stupid=False,
                                            layout='single',
                                            subdir='')
        def file_callback(data):
            def cb(repo, memctx, path):
                if path == data:
                    return context.memfilectx(path=path,
                                              data=data,
                                              islink=False,
                                              isexec=False,
                                              copied=False)
                raise IOError(errno.EINVAL, 'Invalid operation: ' + path)
            return cb

        def commit_to_branch(name, parent):
            repo.commitctx(context.memctx(repo,
                                          (parent, node.nullid),
                                          'automated test (%s)' % name,
                                          [name],
                                          file_callback(name),
                                          'an_author',
                                          '2009-10-19 18:49:30 -0500',
                                          {'branch': name,}))

        parent = repo['tip'].node()
        commit_to_branch('default', parent)
        commit_to_branch('foo', parent)
        hg.update(repo, repo['foo'].node())
        self.pushrevisions()
        repo = self.repo # repo is outdated after the rebase happens, refresh
        self.assertTrue('foo' in self.svnls(''))
        self.assertEqual(repo.branchtags().keys(), ['default'])
        # Have to cross to another branch head, so hg.update doesn't work
        commands.update(ui.ui(),
                        self.repo,
                        self.repo.branchheads('default')[1],
                        clean=True)
        self.pushrevisions()
        self.assertTrue('default' in self.svnls(''))
        self.assertEquals(len(self.repo.branchheads('default')), 1)
示例#33
0
def update_repository( repo, ctx_rev=None ):
    """
    Update the cloned repository to changeset_revision.  It is critical that the installed repository is updated to the desired
    changeset_revision before metadata is set because the process for setting metadata uses the repository files on disk.
    """
    # TODO: We may have files on disk in the repo directory that aren't being tracked, so they must be removed.
    # The codes used to show the status of files are as follows.
    # M = modified
    # A = added
    # R = removed
    # C = clean
    # ! = deleted, but still tracked
    # ? = not tracked
    # I = ignored
    # It would be nice if we could use mercurial's purge extension to remove untracked files.  The problem is that
    # purging is not supported by the mercurial API.
    commands.update( get_configured_ui(), repo, rev=ctx_rev )
示例#34
0
def tbranch(ui, repo, *args, **opts):

  """ show current branch status or switch to a different branch """

  mustBeTopicRepo(repo)

  # If called from the menu, allow user to choose a branch
  if 'tmenu' in opts:
    branches = topicBranchNames(repo)
    if not branches:
      ui.warn("There are no branches to switch to.\n")
      return 1

    ui.status("Branches you can switch to:\n")
    for i in range(len(branches)):
      ui.status("  %4d. %s\n" % (i+1, branches[i]))

    resp = ui.prompt("Which one (1-%d): " % len(branches), '')
    found = None
    i = 0
    while i < len(branches):
      if str(i+1) == resp:
        found = branches[i]
        break
      i += 1
    if not found:
      ui.warn("Unknown number '%s'\n" % resp)
      return 1

    ui.status("Switching to branch: %s\n" % branches[i])
    args = [branches[i]]

  # If no branch specified, show the status of the current branch.
  if len(args) < 1:
    ui.status("Current branch: %s\n" % repo.dirstate.branch())
    ui.status("        Status: %s\n" % calcBranchState(repo, repo.dirstate.branch(), repo.dirstate.parents()[0]))
    return

  # Otherwise, switch to a different (existing) branch
  target = args[0]
  if target == repo.dirstate.branch():
    ui.status("You're already on that branch.\n")
    return

  if not target in topicBranchNames(repo, closed=True) + [repo.topicProdBranch]:
    maybes = topicBranchNames(repo) + [repo.topicProdBranch] # don't check closed branches for maybes
    matches = [b for b in maybes if target.lower() in b.lower()]
    if len(matches) != 1:
      ui.warn("Error: branch '%s' does not exist.\n" % target)
      return 1
    ui.status("(branch %s matches '%s')\n" % (matches[0], target))
    target = matches[0]

  return commands.update(ui, repo, 
                         node = target, 
                         check = not opts.get('clean', False),
                         clean = opts.get('clean', False))
示例#35
0
文件: hgflow.py 项目: djm/dotfiles
    def hgflow_func_feature_finish(self, target_branch, name):
        '''finish this feature.
        1, Check develop branch version and current feature
        1, Close this branch
        2, Merge it into develop
        3, Commit develop branch
        '''

        if not self._findBranch(target_branch, name):
            return
        
        commands.update(self.ui, self.repo, target_branch)
        commands.commit(self.ui, self.repo, close_branch=True, message='hg flow, close feature %s' % (name,))

        commands.update(self.ui, self.repo, self.developBranch)
        commands.merge(self.ui, self.repo, target_branch)
        #commands.commit(self.ui, self.repo, message='hg flow, merge feature `%s` to develop branch `%s`' % (target_branch, self.developBranch))
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to develop branch `%s`' % (name, self.developBranch))
示例#36
0
    def install(self):
        """
        Does the actual installation of this part.

        Be aware, that if the part was previously installed, it will
        get removed.
        """
        self.log.info("Cloning repository %s to %s" % (
            self.source, self.destination
        ))
        shutil.rmtree(self.destination, ignore_errors = True)
        commands.clone(ui.ui(), get_repository(self.source), self.destination)
        self.log.info("Updating to revision %s" % self.rev)
        if self.rev is not None:
            commands.update(ui.ui(), get_repository(self.destination), rev=self.rev)
            if self.as_egg:
                self._install_as_egg()
        return self.destination
示例#37
0
def restack(ui, repo, rebaseopts=None):
    """Repair a situation in which one or more changesets in a stack
       have been obsoleted (thereby leaving their descendants in the stack
       unstable) by finding any such changesets and rebasing their descendants
       onto the latest version of each respective changeset.
    """
    rebaseopts = (rebaseopts or {}).copy()

    # TODO: Remove config override after https://phab.mercurial-scm.org/D1063
    config = {('experimental', 'rebase.multidest'): True}

    with ui.configoverride(config), repo.wlock(), repo.lock():
        # Find drafts connected to the current stack via either changelog or
        # obsolete graph. Note: "draft() & ::." is optimized by D441.

        # 1. Connect drafts via changelog
        revs = list(repo.revs('(draft() & ::.)::'))
        if not revs:
            # "." is probably public. Check its direct children.
            revs = repo.revs('draft() & children(.)')
            if not revs:
                ui.status(_('nothing to restack\n'))
                return 1
        # 2. Connect revs via obsolete graph
        revs = list(repo.revs('successors(%ld)+allpredecessors(%ld)',
                              revs, revs))
        # 3. Connect revs via changelog again to cover missing revs
        revs = list(repo.revs('(draft() & ::%ld)::', revs))

        rebaseopts['rev'] = [revsetlang.formatspec('%ld', revs)]
        rebaseopts['dest'] = '_destrestack(SRC)'

        rebase.rebase(ui, repo, **rebaseopts)

        # Ensure that we always end up on the latest version of the
        # current changeset. Usually, this will be taken care of
        # by the rebase operation. However, in some cases (such as
        # if we are on the precursor of the base changeset) the
        # rebase will not update to the latest version, so we need
        # to do this manually.
        successor = repo.revs('allsuccessors(.)').last()
        if successor is not None:
            commands.update(ui, repo, rev=successor)
示例#38
0
def autoUpdate(ui, repo, hooktype, **opts):
  """ changegroup hook: if a push arrives with changes to the current branch,
      update it automatically. """

  # Silently skip this for non-topic repos
  if not isTopicRepo(repo):
    return 0

  # See if any of the changesets affects our current branch
  thisBranch = repo.dirstate.branch()
  needUpdate = False
  for ctx in [repo[n] for n in range(repo[opts['node']].rev(), len(repo))]:
    if ctx.branch() == thisBranch:
      needUpdate = True
  
  # If changes to our branch were found, do an update.
  if needUpdate:
    ui.status("Auto-update on branch %s\n" % thisBranch)
    commands.update(ui, repo, node = thisBranch)
    ui.status("Done with auto-update on branch %s\n" % thisBranch)
示例#39
0
        def _feature_func(action, name, target_branch):
            if 'start' == action:
                self._startBranch(target_branch, 'feature')

            elif 'finish' == action:
                '''finish this feature.
                1, Check develop branch version and current feature
                1, Close this branch
                2, Merge it into develop
                3, Commit develop branch
                '''

                if not self._findBranch(target_branch, name):
                    return

                commands.update(self.ui, self.repo, target_branch)
                commands.commit(self.ui, self.repo, close_branch=True, message='hg flow, close feature %s' % (name,))

                commands.update(self.ui, self.repo, self.developBranch)
                commands.merge(self.ui, self.repo, target_branch)
                #commands.commit(self.ui, self.repo, message='hg flow, merge feature `%s` to develop branch `%s`' % (target_branch, self.developBranch))
                commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to develop branch `%s`' % (name, self.developBranch))
                #self.outputln(_('WARNING: No automatic commit after merge from feature `%s`, you should resolve the confict (if any) then commit manually.' % (name,)))

            elif 'change' == action:
                commands.update(self.ui, self.repo, target_branch)
                """
            elif 'fetchdev' == action:
                pass
            """
            else:
                self.outputln(_('Please give a valid action.'))
示例#40
0
    def _mergeIntoPublishBranch(self, target_branch, name):
        commands.update(self.ui, self.repo, target_branch)
        commands.commit(self.ui,
                        self.repo,
                        close_branch=True,
                        message='hg flow, close release %s' %
                        (target_branch, ))
        commands.update(self.ui, self.repo, self.publishBranch)

        commands.merge(self.ui, self.repo, target_branch)
        commands.commit(
            self.ui,
            self.repo,
            close_branch=True,
            message='hg flow, merge release `%s` to publish branch `%s`' %
            (name, self.publishBranch))

        tag_name = '%s%s' % (self.versionTagPrefix, name)
        commands.tag(self.ui, self.repo, tag_name)

        #merge it into develop branch, there should be many confilct code
        commands.update(self.ui, self.repo, self.developBranch)
        commands.merge(self.ui, self.repo, self.publishBranch)

        commands.commit(
            self.ui,
            self.repo,
            message='hg flow, merge release `%s` to develop branch `%s`' %
            (name, self.developBranch))
示例#41
0
    def _mergeIntoPublishBranch(self, target_branch, name, source_tag_name = None):
        commands.update(self.ui, self.repo, target_branch)

        #tag for publish
        tag_name = '%s%s' % (self.versionTagPrefix, name)
        if source_tag_name:
            commands.tag(self.ui, self.repo, tag_name, rev=source_tag_name)
        else:
            commands.tag(self.ui, self.repo, tag_name)
        
        commands.commit(self.ui, self.repo, close_branch=True, message='hg flow, close release %s' % (target_branch,))
        commands.update(self.ui, self.repo, self.publishBranch)

        self.outputln('close target_branch')


        if source_tag_name:
            commands.merge(self.ui, self.repo, source_tag_name)
        else:
            commands.merge(self.ui, self.repo, target_branch)
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to publish branch `%s`' % (name, self.publishBranch))
        self.outputln('merge source_tag_name into publish branch')
        
        '''
        tag_name = '%s%s' % (self.versionTagPrefix, name)
        commands.tag(self.ui, self.repo, tag_name)
        '''
        self.outputln('merge target_branch into develop branch')

        #merge it into develop branch, there should be many confilct code
        commands.update(self.ui, self.repo, self.developBranch)
        commands.merge(self.ui, self.repo, target_branch)
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to develop branch `%s`' % (name, self.developBranch))
示例#42
0
    def _mergeIntoPublishBranch(self, target_branch, name, source_tag_name = None):
        commands.update(self.ui, self.repo, target_branch)

        #tag for publish
        tag_name = '%s%s' % (self.versionTagPrefix, name)
        
        commands.commit(self.ui, self.repo, close_branch=True, message='hg flow, close release %s' % (target_branch,))
        commands.update(self.ui, self.repo, self.publishBranch)

        self.outputln('Close branch `%s`' % (target_branch))

        #TODO: source_tag_name should in source_branch
        if source_tag_name:
            commands.merge(self.ui, self.repo, source_tag_name)
            self.outputln('Merge TAG `%s` into PRODCTION branch.' % (source_tag_name, ))
        else:
            commands.merge(self.ui, self.repo, target_branch)
            self.outputln('Merge BRANCH `%s` into PRODCTION branch.' % (target_branch, ))
            
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to publish branch `%s`' % (name, self.publishBranch))
        commands.tag(self.ui, self.repo, tag_name)
        
        self.outputln('Merge BRANCH `%s` into DEVELOP branch.' % (target_branch, ))

        #merge it into develop branch, there should be many confilct code
        commands.update(self.ui, self.repo, self.developBranch)
        commands.merge(self.ui, self.repo, target_branch)
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to develop branch `%s`' % (name, self.developBranch))
示例#43
0
文件: hgflow.py 项目: djm/dotfiles
    def _mergeIntoPublishBranch(self, target_branch, name, source_tag_name = None):
        commands.update(self.ui, self.repo, target_branch)

        #tag for publish
        tag_name = '%s%s' % (self.versionTagPrefix, name)
        commands.tag(self.ui, self.repo, tag_name)
        
        commands.commit(self.ui, self.repo, close_branch=True, message='hg flow, close release %s' % (target_branch,))
        commands.update(self.ui, self.repo, self.publishBranch)

        self.outputln('Close branch `%s`' % (target_branch))

        #TODO: source_tag_name should in source_branch
        if source_tag_name:
            commands.merge(self.ui, self.repo, source_tag_name)
            self.outputln('Merge TAG `%s` into PRODCTION branch.' % (source_tag_name, ))
        else:
            commands.merge(self.ui, self.repo, target_branch)
            self.outputln('Merge BRANCH `%s` into PRODCTION branch.' % (target_branch, ))
            
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to publish branch `%s`' % (name, self.publishBranch))
        
        self.outputln('Merge BRANCH `%s` into DEVELOP branch.' % (target_branch, ))

        #merge it into develop branch, there should be many confilct code
        commands.update(self.ui, self.repo, self.developBranch)
        commands.merge(self.ui, self.repo, target_branch)
        commands.commit(self.ui, self.repo, message='hg flow, merge release `%s` to develop branch `%s`' % (name, self.developBranch))
示例#44
0
文件: utils.py 项目: flodolo/elmo
def _hg_repository_sync(name, url, submits, do_update=True):
    ui_ = ui()
    repopath = os.path.join(settings.REPOSITORY_BASE, name)
    configpath = os.path.join(repopath, '.hg', 'hgrc')
    if not os.path.isfile(configpath):
        if not os.path.isdir(os.path.dirname(repopath)):
            os.makedirs(os.path.dirname(repopath))
        clone(ui_, str(url), str(repopath),
              pull=False, uncompressed=False, rev=[],
              noupdate=False)
        cfg = open(configpath, 'a')
        cfg.write('default-push = ssh%s\n' % str(url)[4:])
        cfg.close()
        ui_.readconfig(configpath)
        hgrepo = repository(ui_, repopath)
    else:
        ui_.readconfig(configpath)
        hgrepo = repository(ui_, repopath)
        cs = submits[-1].changesets[-1]
        try:
            hgrepo.changectx(cs)
        except RepoError:
            pull(ui_, hgrepo, source=str(url),
                 force=False, update=False,
                 rev=[])
            if do_update:
                # Make sure that we're not triggering workers in post 2.6
                # hg. That's not stable, at least as we do it.
                # Monkey patch time
                try:
                    from mercurial import worker
                    if hasattr(worker, '_startupcost'):
                        # use same value as hg for non-posix
                        worker._startupcost = 1e30
                except ImportError:
                    # no worker, no problem
                    pass
                update(ui_, hgrepo)
    return hgrepo
示例#45
0
    def test_updateexternals(self):
        def checkdeps(deps, nodeps, repo, rev=None):
            svnexternals.updateexternals(ui, [rev], repo)
            for d in deps:
                p = os.path.join(repo.root, d)
                self.assertTrue(os.path.isdir(p), 'missing: %s@%r' % (d, rev))
            for d in nodeps:
                p = os.path.join(repo.root, d)
                self.assertTrue(not os.path.isdir(p),
                                'unexpected: %s@%r' % (d, rev))

        ui = self.ui(subrepo=True)
        repo = self._load_fixture_and_fetch('externals.svndump')
        commands.update(ui, repo)
        checkdeps(['deps/project1'], [], repo, 0)
        checkdeps(['deps/project1', 'deps/project2'], [], repo, 1)
        checkdeps(
            ['subdir/deps/project1', 'subdir2/deps/project1', 'deps/project2'],
            ['deps/project1'], repo, 2)
        checkdeps(['subdir/deps/project1', 'deps/project2'],
                  ['subdir2/deps/project1'], repo, 3)
        checkdeps(['subdir/deps/project1'], ['deps/project2'], repo, 4)
示例#46
0
def qrevert(ui, repo, rev, **opts):
    '''
    Revert to a past mq state. This updates both the main checkout as well as
    the patch directory, and leaves either or both at a non-head revision.
    '''
    q = repo.mq
    if not q or not q.qrepo():
        raise error.Abort(_("No revisioned patch queue found"))
    p = q.qrepo()[q.qrepo().lookup(rev)]

    desc = p.description()
    m = qparent_re.search(desc)
    if not m:
        raise error.Abort(_("mq commit is missing needed metadata in comment"))
    qparent = m.group(1)
    m = top_re.search(desc)
    if not m:
        raise error.Abort(_("mq commit is missing needed metadata in comment"))
    top = m.group(1)

    # Check the main checkout before updating the mq checkout
    if repo[None].dirty(merge=False, branch=False):
        raise error.Abort(_("uncommitted local changes"))

    # Pop everything first
    q.pop(repo, None, force=False, all=True, nobackup=True, keepchanges=False)

    # Update the mq checkout
    commands.update(ui, q.qrepo(), rev=rev, check=True)
    # Update the main checkout
    commands.update(ui, repo, rev=qparent, check=False)

    # Push until reaching the correct patch
    if top != "(none)":
        mq.goto(ui, repo, top)

    # Needed?
    q.savedirty()
示例#47
0
def qrevert(ui, repo, rev, **opts):
    '''
    Revert to a past mq state. This updates both the main checkout as well as
    the patch directory, and leaves either or both at a non-head revision.
    '''
    q = repo.mq
    if not q or not q.qrepo():
        raise util.Abort(_("No revisioned patch queue found"))
    p = q.qrepo()[q.qrepo().lookup(rev)]

    desc = p.description()
    m = qparent_re.search(desc)
    if not m:
        raise util.Abort(_("mq commit is missing needed metadata in comment"))
    qparent = m.group(1)
    m = top_re.search(desc)
    if not m:
        raise util.Abort(_("mq commit is missing needed metadata in comment"))
    top = m.group(1)

    # Check the main checkout before updating the mq checkout
    if repo[None].dirty(merge=False, branch=False):
        raise util.Abort(_("uncommitted local changes"))

    # Pop everything first
    q.pop(repo, None, force=False, all=True, nobackup=True, keepchanges=False)

    # Update the mq checkout
    commands.update(ui, q.qrepo(), rev=rev, check=True)
    # Update the main checkout
    commands.update(ui, repo, rev=qparent, check=False)

    # Push until reaching the correct patch
    if top != "(none)":
        mq.goto(ui, repo, top)

    # Needed?
    q.savedirty()
    def merge_heads(self, branch, heads, requestid):
        if len(heads) == 1:
            return  # nothing to merge

        if len(heads) > 2:
            self.ui.status(
                _("Review request bundle import resulted in more than two heads on branch %s"
                  ) % branch)
            raise util.Abort(
                _("Review request bundle import resulted in more than two heads on branch %s"
                  ) % branch)

        self.ui.status(_("Merging heads for branch %s\n") % branch)
        self.ui.pushbuffer()
        try:
            commands.update(self.ui, self.repo, heads[0].rev())
            commands.merge(self.ui, self.repo, tool="internal:fail")

            message = _(
                "Automatic merge after review request %s fetch") % requestid
            commands.commit(self.ui, self.repo, message=message)
        finally:
            self.ui.popbuffer()
示例#49
0
    def test_updateexternals(self):
        def checkdeps(deps, nodeps, repo, rev=None):
            svnexternals.updateexternals(ui, [rev], repo)
            for d in deps:
                p = os.path.join(repo.root, d)
                self.assertTrue(os.path.isdir(p),
                                'missing: %s@%r' % (d, rev))
            for d in nodeps:
                p = os.path.join(repo.root, d)
                self.assertTrue(not os.path.isdir(p),
                                'unexpected: %s@%r' % (d, rev))

        ui = self.ui()
        repo = self._load_fixture_and_fetch('externals.svndump', stupid=0)
        commands.update(ui, repo)
        checkdeps(['deps/project1'], [], repo, 0)
        checkdeps(['deps/project1', 'deps/project2'], [], repo, 1)
        checkdeps(['subdir/deps/project1', 'subdir2/deps/project1',
                   'deps/project2'],
                  ['deps/project1'], repo, 2)
        checkdeps(['subdir/deps/project1', 'deps/project2'],
                  ['subdir2/deps/project1'], repo, 3)
        checkdeps(['subdir/deps/project1'], ['deps/project2'], repo, 4)
示例#50
0
def update_hg(path, skip_rebuild = False):
    from mercurial import hg, ui, commands
    f = open(os.path.join(path, "yt_updater.log"), "a")
    u = ui.ui()
    u.pushbuffer()
    config_fn = os.path.join(path, ".hg", "hgrc")
    print "Reading configuration from ", config_fn
    u.readconfig(config_fn)
    repo = hg.repository(u, path)
    commands.pull(u, repo)
    f.write(u.popbuffer())
    f.write("\n\n")
    u.pushbuffer()
    commands.identify(u, repo)
    if "+" in u.popbuffer():
        print "Can't rebuild modules by myself."
        print "You will have to do this yourself.  Here's a sample commands:"
        print
        print "    $ cd %s" % (path)
        print "    $ hg up"
        print "    $ %s setup.py develop" % (sys.executable)
        return 1
    print "Updating the repository"
    f.write("Updating the repository\n\n")
    commands.update(u, repo, check=True)
    if skip_rebuild: return
    f.write("Rebuilding modules\n\n")
    p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
                        stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
    stdout, stderr = p.communicate()
    f.write(stdout)
    f.write("\n\n")
    if p.returncode:
        print "BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))
        sys.exit(1)
    f.write("Successful!\n")
    print "Updated successfully."
示例#51
0
    def update(self, timeout=None):
        ok = True
        if timeout:
            self.ui.setconfig("ui", "timeout", timeout)

        try:
            if commands.pull(self.ui, self.repo):
                ok = False
            if commands.update(self.ui, self.repo):
                ok = False
        except IndexError:
            e = sys.exc_info()[
                1]  # Needed because python 2.5 does not support 'as e'
            #        except Exception,e:
            raise e
            return False

        return ok
示例#52
0
def tsetup(ui, repo, *args, **kwargs):
  """ Used to set up the topic extension in a pre-configured directory.
      We look for a prod branch in the current directory, and try to find a 
      .topic_hgrc file there.
  """

  mustBeTopicRepo(repo)

  ui.status("Topic setup:\n")
  
  # Make sure we're at the top of the prod branch
  if repo.dirstate.parents()[0] not in repo.branchheads('prod') or \
     repo.dirstate.branch() != repo.topicProdBranch:
    if tryCommand(ui, "update %s" % quoteBranch(repo.topicProdBranch), \
                  lambda:commands.update(ui, repo, node=repo.topicProdBranch, check=True)):
      return 1

  # See if there's a .topic_hgrc_adds file.
  addsFile = os.path.join(repo.path, "..", ".topic_hgrc_adds")
  if not os.path.exists(addsFile):
    raise util.Abort("There is no .topic_hgrc_adds file in your repository. Without that,\n" +
                     "the Topic extension cannot guess how to configure itself.")
  with open(addsFile, "r") as f:
    toAdd = f.read()

  # Stick in the extension
  topicDir = os.path.dirname(__file__)
  toAdd += "\n[extensions]\ntopic = %s\n" % os.path.join(topicDir, "topic.py")

  # Offer to add things to the .hgrc file
  ui.status("The following will be added to the .hg/hgrc file for this repo:\n\n" + toAdd)
  res = ui.prompt("\nOkay to proceed?", default="y")
  if res.lower() != "y" and res.lower() != "yes":
    raise util.Abort("Ok.")

  # Let's do it.
  hgrcFile = os.path.join(repo.path, "hgrc")
  with open(hgrcFile, "r") as f:
    existing = f.read()

  with open(hgrcFile, "w") as f:
    f.write(existing + "\n" + toAdd)

  ui.status("Done.\n")
示例#53
0
def topen(ui, repo, *args, **opts):
  """ open (create) a new topic branch """

  mustBeTopicRepo(repo)

  # Check the arguments
  if 'tmenu' in opts:
    resp = ui.prompt("Name for new branch:", None)
    if not resp:
      return 1
    args = [resp]

  elif len(args) < 1:
    ui.warn("Error: You must specify a name for the new branch.\n")
    return 1

  # Pull new changes from the central repo
  if not opts.get('nopull', False):
    if tryCommand(ui, "pull", lambda:commands.pull(ui, repo, **opts) >= 2):
      return 1

  # Validate the name
  target = args[0]
  if target in topicBranchNames(repo, closed=True) + [repo.topicProdBranch]:
    ui.warn("Error: a branch with that name already exists; try choosing a different name.\n")
    return 1

  # Make sure we're at the top of the prod branch
  if repo.dirstate.parents()[0] not in repo.branchheads('prod') or \
     repo.dirstate.branch() != repo.topicProdBranch:
    if tryCommand(ui, "update %s" % quoteBranch(repo.topicProdBranch), \
                  lambda:commands.update(ui, repo, node=repo.topicProdBranch, check=True)):
      return 1

  # Create the new branch and commit it.
  if tryCommand(ui, 'branch %s' % target, lambda:commands.branch(ui, repo, target)):
    return 1

  text = "Opening branch %s" % quoteBranch(target)
  return tryCommand(ui, "commit", lambda:repo.commit(text) is None)
示例#54
0
        def _feature_func(action, name, target_branch):
            if 'start' == action:
                self._startBranch(target_branch, 'feature')

            elif 'finish' == action:
                '''finish this feature.
                1, Check develop branch version and current feature
                1, Close this branch
                2, Merge it into develop
                3, Commit develop branch
                '''

                if not self._findBranch(target_branch, name):
                    return

                commands.update(self.ui, self.repo, target_branch)
                commands.commit(self.ui,
                                self.repo,
                                close_branch=True,
                                message='hg flow, close feature %s' % (name, ))

                commands.update(self.ui, self.repo, self.developBranch)
                commands.merge(self.ui, self.repo, target_branch)
                #commands.commit(self.ui, self.repo, message='hg flow, merge feature `%s` to develop branch `%s`' % (target_branch, self.developBranch))
                commands.commit(
                    self.ui,
                    self.repo,
                    message='hg flow, merge release `%s` to develop branch `%s`'
                    % (name, self.developBranch))
                #self.outputln(_('WARNING: No automatic commit after merge from feature `%s`, you should resolve the confict (if any) then commit manually.' % (name,)))

            elif 'change' == action:
                commands.update(self.ui, self.repo, target_branch)
                """
            elif 'fetchdev' == action:
                pass
            """
            else:
                self.outputln(_('Please give a valid action.'))
示例#55
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing " b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse " b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
            ui.warn(
                b"(shared store does not belong to pooled storage; "
                b"deleting destination to improve efficiency)\n"
            )
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(
            b"(dest has an active working directory lock; assuming it is "
            b"left over from a previous process and that the destination "
            b"is corrupt; deleting it just to be sure)\n"
        )
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(
                b"reached maximum number of network attempts; " b"giving up\n"
            )

        ui.warn(
            b"(retrying after network failure on attempt %d of %d)\n"
            % (networkattempts[0], networkattemptlimit)
        )

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(_(b"unexpected response from remote server:")):
                ui.warn(b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" % pycompat.bytestr(e.reason))
                handlenetworkfailure()
                return True
            else:
                ui.warn(
                    b"unhandled URLError; reason type: %s; value: %s\n"
                    % (e.reason.__class__.__name__, e.reason)
                )
        else:
            ui.warn(
                b"unhandled exception during network operation; type: %s; "
                b"value: %s\n" % (e.__class__.__name__, e)
            )

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone " b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(
            b"(shared store has an active lock; assuming it is left "
            b"over from a previous process and that the store is "
            b"corrupt; deleting store and destination just to be "
            b"sure)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(
            b"(shared store missing requires file; this is a really "
            b"odd failure; deleting store and destination)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(
                b"(shared store missing requirements: %s; deleting "
                b"store and destination to ensure optimal behavior)\n"
                % b", ".join(sorted(missing))
            )
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires", "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(
                    b"(remote resolved %s to %s; "
                    b"result is not deterministic)\n" % (branch, checkoutrevision)
                )

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purgeext = extensions.find(b"purge")

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, ""
                    )

            with timeit("purge", "purge"):
                if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{"print": None, "print0": None, "dirs": None, "files": None}
                ):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(
                b"sparse profile %s does not exist at revision "
                b"%s" % (sparse_profile, checkoutrevision)
            )

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(
                repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
            )
        else:
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
            ui.write(
                b"(sparse profile %s already set; no need to update "
                b"sparse config)\n" % sparse_profile
            )
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(
                    b"(replacing existing sparse config with profile "
                    b"%s)\n" % sparse_profile
                )
            else:
                ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(
                            repo, [], [], [sparse_profile], force=True
                        ),
                    )
                )

                repo.ui.status(
                    b"%d files added, %d files dropped, "
                    b"%d files conflicting\n" % tuple(fcounts)
                )

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
示例#56
0
def cmddrop(ui, repo, *revs, **opts):
    """I'm hacky do not use me!

    This command strip a changeset, its precursors and all obsolescence marker
    associated to its chain.

    There is no way to limit the extend of the purge yet. You may have to
    repull from other source to get some changeset and obsolescence marker
    back.

    This intended for Matt Mackall usage only. do not use me.
    """
    revs = list(revs)
    revs.extend(opts['rev'])
    if not revs:
        revs = ['.']
    # get the changeset
    revs = scmutil.revrange(repo, revs)
    if not revs:
        ui.write_err('no revision to drop\n')
        return 1
    # lock from the beginning to prevent race
    wlock = lock = None
    try:
        wlock = repo.wlock()
        lock = repo.lock()
        # check they have no children
        if repo.revs('%ld and public()', revs):
            ui.write_err('cannot drop public revision')
            return 1
        if repo.revs('children(%ld) - %ld', revs, revs):
            ui.write_err('cannot drop revision with children')
            return 1
        if repo.revs('. and %ld', revs):
            newrevs = repo.revs('max(::. - %ld)', revs)
            if newrevs:
                assert len(newrevs) == 1
                newrev = newrevs.first()
            else:
                newrev = -1
            commands.update(ui, repo, newrev)
            ui.status(_('working directory now at %s\n') % repo[newrev])
        # get all markers and successors up to root
        nodes = [repo[r].node() for r in revs]
        with timed(ui, 'search obsmarker'):
            markers = set(obsmarkerchainfrom(repo.obsstore, nodes))
        ui.write('%i obsmarkers found\n' % len(markers))
        cl = repo.unfiltered().changelog
        with timed(ui, 'search nodes'):
            allnodes = set(nodes)
            allnodes.update(m[0] for m in markers if cl.hasnode(m[0]))
        ui.write('%i nodes found\n' % len(allnodes))
        cl = repo.changelog
        visiblenodes = set(n for n in allnodes if cl.hasnode(n))
        # check constraint again
        if repo.revs('%ln and public()', visiblenodes):
            ui.write_err('cannot drop public revision')
            return 1
        if repo.revs('children(%ln) - %ln', visiblenodes, visiblenodes):
            ui.write_err('cannot drop revision with children')
            return 1

        if markers:
            # strip them
            with timed(ui, 'strip obsmarker'):
                stripmarker(ui, repo, markers)
        # strip the changeset
        with timed(ui, 'strip nodes'):
            repair.strip(ui, repo, list(allnodes), backup="all",
                         topic='drophack')

    finally:
        lockmod.release(lock, wlock)
示例#57
0
 def update(self, wire, node=None, clean=False):
     repo = self._factory.repo(wire)
     baseui = self._factory._create_config(wire['config'])
     commands.update(baseui, repo, node=node, clean=clean)
示例#58
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None,
                sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui,
                           url,
                           dest,
                           upstream,
                           revision,
                           branch,
                           purge,
                           sharebase,
                           networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot enable sparse profile on existing '
            'non-sparse checkout',
            hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot use non-sparse checkout on existing sparse '
            'checkout',
            hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' %
                        (e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                destvfs.rmtree(forcibly=True)
            storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            res = hg.clone(ui, {},
                           clonepeer,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(
                    repo.root, '')

            if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{
                        'print': None,
                        'print0': None,
                        'dirs': None,
                        'files': None
                    }):
                raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock():
                fcounts = map(
                    len,
                    sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                          [sparse_profile],
                                                          force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None