Example #1
0
    def test_push_to_non_tip(self):
        self.test_push_to_branch(push=False)
        wc2path = self.wc_path + '_clone'
        u = self.repo.ui
        hg.clone(self.repo.ui, self.wc_path, wc2path, update=False)
        res = self.pushrevisions()
        self.assertEqual(0, res)
        oldf = open(os.path.join(self.wc_path, '.hg', 'hgrc'))
        hgrc = oldf.read()
        oldf.close()
        shutil.rmtree(self.wc_path)
        hg.clone(u, wc2path, self.wc_path, update=False)
        oldf = open(os.path.join(self.wc_path, '.hg', 'hgrc'), 'w')
        oldf.write(hgrc)
        oldf.close()

        # do a commit here
        self.commitchanges([('foobaz', 'foobaz', 'This file is added on default.', ),
                            ],
                           parent='default',
                           message='commit to default')
        from hgsubversion import svncommands
        svncommands.rebuildmeta(u,
                                self.repo,
                                args=[test_util.fileurl(self.repo_path)])


        hg.update(self.repo, self.repo['tip'].node())
        oldnode = self.repo['tip'].hex()
        self.pushrevisions(expected_extra_back=1)
        self.assertNotEqual(oldnode, self.repo['tip'].hex(), 'Revision was not pushed.')
Example #2
0
def hgclone(ui, source, dest, update=True, rev=None):
    if getattr(hg, 'peer', None):
        # Since 1.9 (d976542986d2)
        src, dest = hg.clone(ui, {}, source, dest, update=update, rev=rev)
    else:
        src, dest = hg.clone(ui, source, dest, update=update, rev=rev)
    return src, dest
Example #3
0
def _checkevolve(ui, cw, hg_version):
    if hg_version < (4, 3, 0):
        ui.warn(EVOLVE_INCOMPATIBLE)
        return

    remote_evolve_path = 'https://www.mercurial-scm.org/repo/evolve/'
    # Install to the same dir as v-c-t, unless the mozbuild directory path is passed (testing)
    evolve_clone_dir = ui.config('mozilla', 'mozbuild_state_path', _vcthome())

    local_evolve_path = '{evolve_clone_dir}/evolve'.format(
        evolve_clone_dir=evolve_clone_dir)
    evolve_config_value = '{evolve_path}/hgext3rd/evolve'.format(
        evolve_path=local_evolve_path)

    # If evolve is not installed, install it
    if not ui.hasconfig('extensions', 'evolve'):
        if uipromptchoice(ui, EVOLVE_INFO_WARNING):
            return

        try:
            # Clone the evolve extension and enable
            hg.clone(ui, {},
                     remote_evolve_path,
                     branch=('stable', ),
                     dest=local_evolve_path)
            _enableext(cw, 'evolve', evolve_config_value)

            ui.write('Evolve was downloaded successfully.\n')

        except error.Abort as hg_err:
            ui.write(str(hg_err))
            ui.write(EVOLVE_CLONE_ERROR)

    # If evolve is installed and managed by this wizard,
    # update it via pull/update
    elif ui.config('extensions', 'evolve') == evolve_config_value:
        if uipromptchoice(
                ui, EVOLVE_UPDATE_PROMPT.format(evolve_dir=local_evolve_path)):
            return

        try:
            local_evolve_repo = localrepo.localrepository(
                ui, local_evolve_path)

            # Pull the latest stable, update to tip
            hgpull(ui,
                   local_evolve_repo,
                   source=remote_evolve_path,
                   branch=('stable', ))
            hgupdate(ui, local_evolve_repo, rev='stable')

            ui.write('Evolve was updated successfully.\n')

        except error.Abort as hg_err:
            ui.write(EVOLVE_CLONE_ERROR)

    # If evolve is not managed by this wizard, do nothing
    else:
        return
    def clone(self, destination=None):
        """ Clone the repository to the local disk. """

        if destination is not None:
            self.destination = destination

        hg.clone(ui.ui(), dict(), self.url, self.destination, True)
        self._repository = hg.repository(ui.ui(), self.destination)
    def clone(self, destination=None):
        """ Clone the repository to the local disk. """

        if destination is not None:
            self.destination = destination

        hg.clone(ui.ui(), dict(), self.url, self.destination, True)
        self._repository = hg.repository(ui.ui(), self.destination)
Example #6
0
def pop_queue(request, queue_name):
    # test count with
    # curl -i http://localhost:8000/q/default/
    # curl -i http://localhost:8000/q/default/json/
    
    # print "GET queue_name is %s" % queue_name
    q = None
    # pre-emptive queue name checking...
    try:
        q = Queue.objects.get(name=queue_name)
    except Queue.DoesNotExist:
        return HttpResponseNotFound()
    #
    msg = q.message_set.pop()
    response_message='void'
    if msg:
        u = ui.ui()
        message = json_encode(msg.message)
        project = Project.projects.get(project_id__exact = message['local_parent_project'])
        repo = Repo.objects.get(directory_name__exact=message['directory_name'], local_parent_project__exact=project)
        
        if (queue_name == 'repoclone'):
            try:
                hg.clone(u, str(repo.default_path), repo.repo_directory, True)
                repo.created = True
            except:
                response_message = 'failed'
            try:
                m = Message.objects.get(id=msg.id, queue=q.id)
                m.delete()
                repo.save()
                project.save()
                response_message = 'success'
            except:
                response_message = 'failed'
        elif (queue_name == 'repoupdate'):
            location = hg.repository(u, repo.repo_directory)
            try:
                commands.pull(u, location, str(repo.default_path), rev=['tip'], force=True, update=True)
                repo.folder_size = 0
                for (path, dirs, files) in os.walk(repo.repo_directory):
                    for file in files:
                        filename = os.path.join(path, file)
                        repo.folder_size += os.path.getsize(filename)
                repo.save()
                m = Message.objects.get(id=msg.id, queue=q.id)
                m.delete()
                project.save()
                response_message = 'success'
            except:
                response_message = 'failed'
    if (response_message == 'failed'):
        return HttpResponseServerError()
    else:
        return HttpResponse(response_message)
Example #7
0
def _checkevolve(ui, cw, hg_version):
    if hg_version < (4, 3, 0):
        ui.warn(EVOLVE_INCOMPATIBLE)
        return

    remote_evolve_path = b'https://www.mercurial-scm.org/repo/evolve/'
    # Install to the same dir as v-c-t, unless the mozbuild directory path is passed (testing)
    evolve_clone_dir = ui.config(b'mozilla', b'mozbuild_state_path', _vcthome())

    local_evolve_path = b'%(evolve_clone_dir)s/evolve' % {b'evolve_clone_dir': evolve_clone_dir}
    evolve_config_value = os.path.normpath('%(evolve_path)s/hgext3rd/evolve' % \
                                           {'evolve_path': pycompat.sysstr(local_evolve_path)})

    users_evolve_path = ui.config(b'extensions', b'evolve')
    if users_evolve_path:
        users_evolve_path = os.path.normpath(pycompat.fsdecode(util.normpath(util.expandpath(users_evolve_path))))

    # If evolve is not installed, install it. (If the user's path to evolve is
    # the path that we manage, but it doesn't exist yet, assume that their
    # config file has been copied to a new machine and we need to clone evolve.
    if users_evolve_path == None or \
            (users_evolve_path == evolve_config_value and not os.path.exists(evolve_config_value)):
        if uipromptchoice(ui, EVOLVE_INFO_WARNING):
            return

        try:
            # Clone the evolve extension and enable
            hg.clone(ui, {}, remote_evolve_path, branch=(b'stable',), dest=local_evolve_path)
            _enableext(cw, 'evolve', evolve_config_value)

            ui.write(b'Evolve was downloaded successfully.\n')

        except error.Abort as hg_err:
            ui.write(pycompat.bytestr(hg_err))
            ui.write(EVOLVE_CLONE_ERROR)

        return

    # If evolve is installed and managed by this wizard,
    # update it via pull/update
    if users_evolve_path == evolve_config_value:
        if uipromptchoice(ui, EVOLVE_UPDATE_PROMPT % {b'evolve_dir': local_evolve_path}):
            return

        try:
            local_evolve_repo = hg.repository(ui, local_evolve_path)

            # Pull the latest stable, update to tip
            hgpull(ui, local_evolve_repo, source=remote_evolve_path, branch=(b'stable',))
            hgupdate(ui, local_evolve_repo, rev=b'stable')

            ui.write(b'Evolve was updated successfully.\n')

        except error.Abort as hg_err:
            ui.write(EVOLVE_CLONE_ERROR)
Example #8
0
def hgclone(ui, source, dest, update=True, rev=None):
    if getattr(hg, 'peer', None):
        try:
            # Since 1.9 (d976542986d2)
            src, dest = hg.clone(ui, {}, source, dest, update=update, rev=rev)
        except TypeError:
            # hg 4.6+ wants revs instead of rev
            src, dest = hg.clone(ui, {}, source, dest, update=update, revs=rev)
    else:
        src, dest = hg.clone(ui, source, dest, update=update, rev=rev)
    return src, dest
Example #9
0
 def checkout(self, path="."):
     """Clone a repository."""
     path = os.path.abspath(path)
     if self.url == path:
         # update
         hg.update(self._repository, None)
     else:
         try:
             hg.clone(self._repository.ui, {}, self.url, path, update=True)
         except:  # hg.clone fails for older versions of mercurial, e.g. 1.5
             local_repos = hg.repository(self._repository.ui, path, create=True)
             local_repos.pull(self._repository)
             hg.update(local_repos, None)
Example #10
0
    def clone(self, destination=None):
        """ Clone the repository to the local disk. """
        if destination is not None:
            self.destination = destination

        print "*** Cloning repository to '%s'" % self.destination

        # Bug 676793: Due to an API change in 1.9 the order of parameters has
        #             been changed.
        if __version__.version >= "1.9":
            hg.clone(ui.ui(), dict(), self.url, self.destination, True)
        else:
            hg.clone(ui.ui(), self.url, self.destination, True)
        self._repository = hg.repository(ui.ui(), self.destination)
Example #11
0
 def checkout(self, path="."):
     """Clone a repository."""
     path = os.path.abspath(path)
     if self.url == path:
         # update
         hg.update(self._repository, None)
     else:
         try:
             hg.clone(self._repository.ui, {}, self.url, path, update=True)
         except:  # hg.clone fails for older versions of mercurial, e.g. 1.5
             local_repos = hg.repository(self._repository.ui,
                                         path,
                                         create=True)
             local_repos.pull(self._repository)
             hg.update(local_repos, None)
Example #12
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')
        # FIXME: the following is a hack to achieve hg-git / remote-git compatibility
        # at least for *local* operations. still need to figure out what the right
        # thing to do is.
        myui.setconfig('phases', 'publish', False)

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(myui, {}, url.encode('utf-8'),
                    local_path.encode('utf-8'), update=False, pull=True)
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            hg_pull(self.repo, self.peer, None, True)

        self.marks.upgrade_marks(self)
Example #13
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(myui, {}, url.encode('utf-8'),
                    local_path.encode('utf-8'), update=False, pull=True)
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            self.repo.pull(self.peer, heads=None, force=True)

        self.marks.upgrade_marks(self.repo)
Example #14
0
    def create_repo(self, repo):
        self.ui = ui.ui()

        folder = self.get_repo_path()

        if os.path.exists(folder):
            return hg.repository(self.ui, folder)

        try:
            hg.clone(self.ui, dict(), str(repo.url), folder, pull=True)
        except ValueError:
            repo = hg.repository(self.ui, folder)

            hg.update(repo, node.hex(node.nullid))

        return self.create_repo(repo)
Example #15
0
    def clone_repo(self, repo, out_dir, to_rev=None):
        #if not to_rev is None:
        #    to_rev = repo[to_rev]

        return hg.clone(self.ui_, repo,
                        dest=os.path.join(self.test_root, out_dir),
                        pull=False, rev=to_rev, update=True, stream=False)[1]
Example #16
0
def clone(orig, ui, source, dest=None, **opts):
    """
    Some of the options listed below only apply to Subversion
    %(target)s. See 'hg help %(extension)s' for more information on
    them as well as other ways of customising the conversion process.
    """

    for opt, (section, name) in optionmap.iteritems():
        if opt in opts and opts[opt]:
            ui.setconfig(section, name, str(opts.pop(opt)))

    # this must be kept in sync with mercurial/commands.py
    srcrepo, dstrepo = hg.clone(cmdutil.remoteui(ui, opts), source, dest,
                                pull=opts.get('pull'),
                                stream=opts.get('uncompressed'),
                                rev=opts.get('rev'),
                                update=not opts.get('noupdate'))

    if dstrepo.local() and srcrepo.capable('subversion'):
        fd = dstrepo.opener("hgrc", "a", text=True)
        for section in set(s for s, v in optionmap.itervalues()):
            config = dict(ui.configitems(section))
            for name in dontretain[section]:
                config.pop(name, None)

            if config:
                fd.write('\n[%s]\n' % section)
                map(fd.write, ('%s = %s\n' % p for p in config.iteritems()))
Example #17
0
    def build_repo(self, url):
        """Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched."""
        myui = ui()
        myui.setconfig("ui", "interactive", "off")
        myui.setconfig("extensions", "mq", "")
        # FIXME: the following is a hack to achieve hg-git / remote-git compatibility
        # at least for *local* operations. still need to figure out what the right
        # thing to do is.
        myui.setconfig("phases", "publish", False)

        local_path = self.remotedir.joinpath("clone")
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(
                    myui, {}, url.encode("utf-8"), local_path.encode("utf-8"), update=False, pull=True
                )
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode("utf-8"))
            self.peer = hg.peer(myui, {}, url.encode("utf-8"))
            self.repo.pull(self.peer, heads=None, force=True)

        self.marks.upgrade_marks(self)
Example #18
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')
        # FIXME: the following is a hack to achieve hg-git / remote-git compatibility
        # at least for *local* operations. still need to figure out what the right
        # thing to do is.
        myui.setconfig('phases', 'publish', False)

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(myui, {},
                                              url.encode('utf-8'),
                                              local_path.encode('utf-8'),
                                              update=False,
                                              pull=True)
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            self.repo.pull(self.peer, heads=None, force=True)

        self.marks.upgrade_marks(self)
def _do_case(self, name, layout):
    subdir = test_util.subdir.get(name, '')
    self._load_fixture_and_fetch(name, subdir=subdir, stupid=False, layout=layout)
    assert len(self.repo) > 0, 'Repo had no changes, maybe you need to add a subdir entry in test_util?'
    wc2_path = self.wc_path + '_stupid'
    u = ui.ui()
    checkout_path = self.repo_path
    if subdir:
        checkout_path += '/' + subdir
    u.setconfig('hgsubversion', 'stupid', '1')
    u.setconfig('hgsubversion', 'layout', layout)
    hg.clone(u, test_util.fileurl(checkout_path), wc2_path, update=False)
    if layout == 'single':
        self.assertEqual(len(self.repo.heads()), 1)
    self.repo2 = hg.repository(ui.ui(), wc2_path)
    self.assertEqual(self.repo.heads(), self.repo2.heads())
Example #20
0
def _do_case(self, name, stupid, single):
    subdir = test_util.subdir.get(name, '')
    layout = 'auto'
    if single:
        layout = 'single'
    self._load_fixture_and_fetch(name, subdir=subdir, stupid=stupid, layout=layout)
    assert len(self.repo) > 0
    wc2_path = self.wc_path + '_clone'
    u = ui.ui()
    src, dest = hg.clone(u, self.wc_path, wc2_path, update=False)

    # insert a wrapper that prevents calling changectx.children()
    def failfn(orig, ctx):
        self.fail('calling %s is forbidden; it can cause massive slowdowns '
                  'when rebuilding large repositories' % orig)

    origchildren = getattr(context.changectx, 'children')
    extensions.wrapfunction(context.changectx, 'children', failfn)

    try:
        svncommands.rebuildmeta(u, dest,
                                args=[test_util.fileurl(self.repo_path +
                                                        subdir), ])
    finally:
        # remove the wrapper
        context.changectx.children = origchildren

    self.assertTrue(os.path.isdir(os.path.join(src.path, 'svn')),
                    'no .hg/svn directory in the source!')
    self.assertTrue(os.path.isdir(os.path.join(src.path, 'svn')),
                    'no .hg/svn directory in the destination!')
    dest = hg.repository(u, os.path.dirname(dest.path))
    for tf in ('rev_map', 'uuid', 'tagmap', 'layout', 'subdir', ):
        stf = os.path.join(src.path, 'svn', tf)
        self.assertTrue(os.path.isfile(stf), '%r is missing!' % stf)
        dtf = os.path.join(dest.path, 'svn', tf)
        self.assertTrue(os.path.isfile(dtf), '%r is missing!' % tf)
        old, new = open(stf).read(), open(dtf).read()
        self.assertMultiLineEqual(old, new)
        self.assertEqual(src.branchtags(), dest.branchtags())
    srcbi = pickle.load(open(os.path.join(src.path, 'svn', 'branch_info')))
    destbi = pickle.load(open(os.path.join(dest.path, 'svn', 'branch_info')))
    self.assertEqual(sorted(srcbi.keys()), sorted(destbi.keys()))
    revkeys = svnmeta.SVNMeta(dest).revmap.keys()
    for branch in destbi:
        srcinfo = srcbi[branch]
        destinfo = destbi[branch]
        if srcinfo[:2] == (None, 0) or destinfo[:2] == (None, 0):
            self.assertTrue(srcinfo[2] <= destinfo[2],
                            'Latest revision for %s decreased from %d to %d!'
                            % (branch or 'default', srcinfo[2], destinfo[2]))
            self.assertEqual(srcinfo[0], destinfo[0])
        else:
            pr = sorted(filter(lambda x: x[1] == srcinfo[0] and x[0] <= srcinfo[1],
                        revkeys), reverse=True)[0][0]
            self.assertEqual(pr, destinfo[1])
            self.assertEqual(srcinfo[2], destinfo[2])
Example #21
0
 def hg_clone(ui,
              peeropts,
              source,
              dest=None,
              pull=False,
              rev=None,
              update=True,
              stream=False,
              branch=None):
     rui = hg.remoteui(ui, peeropts)
     return hg.clone(rui, source, dest, pull, rev, update, stream)
Example #22
0
 def hg_clone(ui,
              peeropts,
              source,
              dest=None,
              pull=False,
              revs=None,
              update=True,
              stream=False,
              branch=None):
     rui = hg.remoteui(ui, peeropts)
     return hg.clone(rui, source, dest, pull, revs, update, stream)
Example #23
0
File: hg.py Project: dahool/vertaal
    def init_repo(self):
        logger.debug("init")
        self._send_callback(self.callback_on_action_notify,_('Initializing repository %s') % self._remote_path)

        try:
            logger.debug("Checkout %s on %s" % (self._remote_path, self.location))

            remote_repo, repo = hg.clone(self.ui, self._remote_path, self.location)
            commands.update(repo.ui, repo, self.branch)
            self._process_files()
            logger.debug("end")
        except RepoError, e:
            raise BrowserException, e
Example #24
0
def kclone(ui, source, bookmark, dest=None):
    """Clone the source repo at the specified bookmark."""
    r = hg.clone(ui, peeropts={}, source=source, dest=dest, rev=[bookmark])
    if r is None:
        return 1
    srcrepo, destrepo = r

    # Clear any bookmarks that were carried over. We don't want or need them.
    destrepo._bookmarks.clear()
    bookmarks.write(destrepo)

    # Save the bookmark that we're tracking so that we can use it later
    _set_bookmark(destrepo, bookmark)
Example #25
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            self.peer, dstpeer = hg.clone(myui, {}, url.encode('utf-8'),
                local_path.encode('utf-8'), update=False, pull=True)
            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            self.repo.pull(self.peer, heads=None, force=True)
Example #26
0
 def test_most_recent_is_edited(self, stupid=False):
     repo = self._load_fixture_and_fetch('most-recent-is-edit-tag.svndump',
                                         stupid=stupid)
     self.repo.ui.status(
         "Note: this test failing may be because of a rebuildmeta failure.\n"
         "You should check that before assuming issues with this test.\n")
     wc2_path = self.wc_path + '2'
     src, dest = hg.clone(repo.ui, self.wc_path, wc2_path, update=False)
     svncommands.rebuildmeta(repo.ui,
                            dest,
                            args=[test_util.fileurl(self.repo_path), ])
     commands.pull(self.repo.ui, self.repo, stupid=stupid)
     dtags, srctags = dest.tags(), self.repo.tags()
     dtags.pop('tip')
     srctags.pop('tip')
     self.assertEqual(dtags, srctags)
     self.assertEqual(dest.heads(), self.repo.heads())
Example #27
0
 def hg_clone(ui,
              peeropts,
              source,
              dest=None,
              pull=False,
              revs=None,
              update=True,
              stream=False,
              branch=None):
     return hg.clone(ui,
                     peeropts,
                     source,
                     dest=dest,
                     pull=pull,
                     rev=revs,
                     update=update,
                     stream=stream,
                     branch=branch)
def _do_case(self, name, stupid, single):
    subdir = test_util.subdir.get(name, '')
    layout = 'auto'
    if single:
        layout = 'single'
    self._load_fixture_and_fetch(name, subdir=subdir, stupid=stupid, layout=layout)
    assert len(self.repo) > 0
    wc2_path = self.wc_path + '_clone'
    u = ui.ui()
    src, dest = hg.clone(u, self.wc_path, wc2_path, update=False)
    svncommands.rebuildmeta(u,
                            dest,
                            args=[test_util.fileurl(self.repo_path +
                                                    subdir), ])
    self.assertTrue(os.path.isdir(os.path.join(src.path, 'svn')),
                    'no .hg/svn directory in the source!')
    self.assertTrue(os.path.isdir(os.path.join(src.path, 'svn')),
                    'no .hg/svn directory in the destination!')
    dest = hg.repository(u, os.path.dirname(dest.path))
    for tf in ('rev_map', 'uuid', 'tagmap', 'layout', ):
        stf = os.path.join(src.path, 'svn', tf)
        self.assertTrue(os.path.isfile(stf), '%r is missing!' % stf)
        dtf = os.path.join(dest.path, 'svn', tf)
        self.assertTrue(os.path.isfile(dtf), '%r is missing!' % tf)
        old, new = open(stf).read(), open(dtf).read()
        # uncomment next line for easy-ish debugging.
        # os.system('diff -u %s %s' % (stf, dtf))
        self.assertEqual(old, new)
        self.assertEqual(src.branchtags(), dest.branchtags())
    srcbi = pickle.load(open(os.path.join(src.path, 'svn', 'branch_info')))
    destbi = pickle.load(open(os.path.join(dest.path, 'svn', 'branch_info')))
    self.assertEqual(sorted(srcbi.keys()), sorted(destbi.keys()))
    revkeys = svnmeta.SVNMeta(dest).revmap.keys()
    for branch in destbi:
        srcinfo = srcbi[branch]
        destinfo = destbi[branch]
        if srcinfo[:2] == (None, 0) or destinfo[:2] == (None, 0):
            self.assert_(srcinfo[2] <= destinfo[2])
            self.assertEqual(srcinfo[0], destinfo[0])
        else:
            pr = sorted(filter(lambda x: x[1] == srcinfo[0] and x[0] <= srcinfo[1],
                        revkeys), reverse=True)[0][0]
            self.assertEqual(pr, destinfo[1])
            self.assertEqual(srcinfo[2], destinfo[2])
Example #29
0
 def clone_from(self, source_url):
     '''Initialize a repo as a clone of another'''
     fullname = self._setup_paths(create_repo_dir=False)
     if os.path.exists(fullname):
         shutil.rmtree(fullname)
     log.info('Initialize %r as a clone of %s', self._repo, source_url)
     # !$ hg doesn't like unicode as urls
     src, repo = hg.clone(ui.ui(),
                          source_url.encode('utf-8'),
                          self._repo.full_fs_path.encode('utf-8'),
                          update=False)
     self.__dict__['_hg'] = repo
     self._setup_special_files()
     self._repo.status = 'analyzing'
     session(self._repo).flush()
     log.info('... %r cloned, analyzing', self._repo)
     self._repo.refresh(notify=False)
     self._repo.status = 'ready'
     log.info('... %s ready', self._repo)
     session(self._repo).flush()
Example #30
0
    def clone(self, dest, rev=None, update=True):
        """\
        Clones this repository to target destination `dest'.

        dest -
            the destination.
        rev -
            specifies specific revisions to clone.
        """

        if not isinstance(dest, basestring):
            raise TypeError('dest must be an instance of basestring')

        if isinstance(dest, unicode):
            dest = dest.encode('utf8')

        dest = os.path.normpath(dest)

        if os.path.exists(dest):
            raise PathExistsError('dest already exists')

        pdir = os.path.split(dest)[0]
        if not os.path.exists(pdir):
            # try to create parent dir.
            try:
                os.makedirs(pdir, mode=0700)
            except:
                raise PathInvalidError(
                        'cannot create directory with specified path')

        if rev:
            try:
                rev = [self._repo.lookup(rev)]
            except:
                raise RevisionNotFoundError('revision %s not found' % rev)

        clone_result = hg.clone(self._ui, {}, source=self._rpath, dest=dest, 
                rev=rev, update=update)
        repo, repo_clone = clone_result
        # since it did get reinitialized.
        self._repo = repo.local()  # the self reference is always a local
    def test_branchmap_rebuildmeta(self, stupid=False):
        '''test rebuildmeta on a branchmapped clone'''
        test_util.load_svndump_fixture(self.repo_path, 'branchmap.svndump')
        branchmap = open(self.branchmap, 'w')
        branchmap.write("badname = dit\n")
        branchmap.write("feature = dah\n")
        branchmap.close()
        ui = self.ui(stupid)
        ui.setconfig('hgsubversion', 'branchmap', self.branchmap)
        commands.clone(ui, test_util.fileurl(self.repo_path),
                       self.wc_path, branchmap=self.branchmap)
        originfo = self.repo.svnmeta().branches

        # clone & rebuild
        ui = self.ui(stupid)
        src, dest = hg.clone(ui, self.wc_path, self.wc_path + '_clone',
                             update=False)
        svncommands.rebuildmeta(ui, dest,
                                args=[test_util.fileurl(self.repo_path)])

        # just check the keys; assume the contents are unaffected by the branch
        # map and thus properly tested by other tests
        self.assertEquals(sorted(src.svnmeta().branches),
                          sorted(dest.svnmeta().branches))
Example #32
0
 def clone_from(self, source_url):
     '''Initialize a repo as a clone of another'''
     self._repo.status = 'cloning'
     session(self._repo).flush(self._repo)
     log.info('Initialize %r as a clone of %s',
              self._repo, source_url)
     try:
         fullname = self._setup_paths(create_repo_dir=False)
         if os.path.exists(fullname):
             shutil.rmtree(fullname)
         # !$ hg doesn't like unicode as urls
         src, repo = hg.clone(
             ui.ui(),
             source_url.encode('utf-8'),
             self._repo.full_fs_path.encode('utf-8'),
             update=False)
         self.__dict__['_hg'] = repo
         self._setup_special_files()
     except:
         self._repo.status = 'raise'
         session(self._repo).flush(self._repo)
         raise
     log.info('... %r cloned', self._repo)
     self._repo.refresh(notify=False)
Example #33
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None,
                sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui,
                           url,
                           dest,
                           upstream,
                           revision,
                           branch,
                           purge,
                           sharebase,
                           networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot enable sparse profile on existing '
            'non-sparse checkout',
            hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot use non-sparse checkout on existing sparse '
            'checkout',
            hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' %
                        (e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                destvfs.rmtree(forcibly=True)
            storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            res = hg.clone(ui, {},
                           clonepeer,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(
                    repo.root, '')

            if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{
                        'print': None,
                        'print0': None,
                        'dirs': None,
                        'files': None
                    }):
                raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock():
                fcounts = map(
                    len,
                    sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                          [sparse_profile],
                                                          force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
Example #34
0
def clone_cache_cmd(ui, source, dest=None, **opts):
    source_url = url(source)
    if source_url.fragment is not None:
        raise ValueError('Someone is being clever! We are not clever. Bail.')

    orig_source = source
    cache_source = os.path.join(CACHE, url_to_filename(source))
    was_cached = False
    clone_source = source
    if not opts.get('nocache'):
        was_cached = os.path.exists(cache_source)
        if was_cached:
            ui.status('cloning from cache {}\n'.format(cache_source))
            clone_source = cache_source
            if dest is None:
                dest = hg.defaultdest(source)
            if opts.get('rev'):
                ui.status('updating cache {} to rev {}\n'.format(cache_source, opts.get('rev')))
                cache_peer = hg.peer(ui, {}, cache_source)
                commands.pull(cache_peer.ui, cache_peer.local(), noupdate=True, rev=opts.get('rev'))
        else:
            ui.status('no cache found at {}, cloning from source {}\n'.format(
                cache_source, source))
    
    if opts.get('noupdate') and opts.get('updaterev'):
        raise util.Abort(_("cannot specify both --noupdate and --updaterev"))

    r = hg.clone(ui, opts, clone_source, dest,
                 pull=opts.get('pull'),
                 stream=opts.get('uncompressed'),
                 rev=opts.get('rev'),
                 update=opts.get('updaterev') or not opts.get('noupdate'),
                 branch=opts.get('branch'))

    if r is None:
        return True

    source_peer, dest_peer = r

    if was_cached:
        dest_repo = dest_peer.local()
        if dest_repo:
            orig_source = dest_repo.ui.expandpath(orig_source)
            abspath = orig_source
            if hg.islocal(orig_source):
                abspath = os.path.abspath(hg.util.urllocalpath(orig_source))

            u = url(abspath)
            u.passwd = None
            defaulturl = str(u)
            fp = dest_repo.opener("hgrc", "w", text=True)
            fp.write("[paths]\n")
            fp.write("default = %s\n" % defaulturl)
            fp.write('\n')
            fp.write('[clonecache]\n')
            fp.write('cache = %s\n' % cache_source)
            fp.close()

            dest_repo.ui.setconfig('paths', 'default', defaulturl, 'clone')

            commands.pull(dest_repo.ui, dest_repo)

            commands.update(ui, dest_repo)

    return False
Example #35
0
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
                networkattemptlimit, networkattempts=None, sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort('cannot enable sparse profile on existing '
                          'non-sparse checkout',
                          hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort('cannot use non-sparse checkout on existing sparse '
                          'checkout',
                          hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' % (
                    e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            destvfs.rmtree(forcibly=True)
        storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                destvfs.rmtree(forcibly=True)
            storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            res = hg.clone(ui, {}, clonepeer, dest=dest, update=False,
                           shareopts={'pool': sharebase, 'mode': 'identity'})
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                                   'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(repo.root, '')

            if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                              # The function expects all arguments to be
                              # defined.
                              **{'print': None, 'print0': None, 'dirs': None,
                                 'files': None}):
                raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock():
                fcounts = map(len, sparsemod._updateconfigandrefreshwdir(
                    repo, [], [], [sparse_profile], force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit, networkattempts)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        storevfs = getvfs()(storepath, audit=False)
        if storevfs.isfileorlink('store/lock'):
            ui.warn('(shared store has an active lock; assuming it is left '
                    'over from a previous process and that the store is '
                    'corrupt; deleting store and destination just to be '
                    'sure)\n')
            destvfs.rmtree(forcibly=True)
            deletesharedstore(storepath)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True

        return False

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {},
                           cloneurl,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(
                ui,
                repo,
                all=True,
                abort_on_err=True,
                # The function expects all arguments to be
                # defined.
                **{
                    'print': None,
                    'print0': None,
                    'dirs': None,
                    'files': None
                }):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
 def _load_fixture_and_fetch_with_anchor(self, fixture_name, anchor):
     test_util.load_svndump_fixture(self.repo_path, fixture_name)
     source = '%s#%s' % (test_util.fileurl(self.repo_path), anchor)
     repo = hg.clone(self.ui(), source=source, dest=self.wc_path)
     return hg.repository(self.ui(), self.wc_path)
Example #38
0
 def create_from(self, source):
     hg.clone(hg.remoteui(self.ui, {}), {}, str(source), self.path.strpath)
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase):
    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase)

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.
    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {}, cloneurl, dest=dest, update=False,
                           shareopts={'pool': sharebase, 'mode': 'identity'})
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                                   'SHA-1 fragment')

        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            pullop = exchange.pull(repo, remote, heads=pullrevs)
            if not pullop.rheads:
                raise error.Abort('unable to pull requested revision')
        except error.Abort as e:
            if e.message == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return callself()

            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                          # The function expects all arguments to be
                          # defined.
                          **{'print': None, 'print0': None, 'dirs': None,
                             'files': None}):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=revision or branch, clean=True):
        raise error.Abort('error updating')

    ctx = repo[revision or branch]
    ui.write('updated to %s\n' % ctx.hex())
    return None
import os, sys, time
from mercurial import hg, ui, commands

TESTDIR = os.environ["TESTDIR"]

# only makes sense to test on os which supports symlinks
if not hasattr(os, "symlink"):
    sys.exit(80) # SKIPPED_STATUS defined in run-tests.py

# clone with symlink support
u = ui.ui()
hg.clone(u, os.path.join(TESTDIR, 'test-no-symlinks.hg'), 'test0')

repo = hg.repository(u, 'test0')

# wait a bit, or the status call wont update the dirstate
time.sleep(1)
commands.status(u, repo)

# now disable symlink support -- this is what os.symlink would do on a
# non-symlink file system
def symlink_failure(src, dst):
    raise OSError, (1, "Operation not permitted")
os.symlink = symlink_failure

# dereference links as if a Samba server has exported this to a
# Windows client
for f in 'test0/a.lnk', 'test0/d/b.lnk':
    os.unlink(f)
    fp = open(f, 'wb')
    fp.write(open(f[:-4]).read())
Example #41
0
import os, sys, time
from mercurial import hg, ui, commands, util

TESTDIR = os.environ["TESTDIR"]
BUNDLEPATH = os.path.join(TESTDIR, 'bundles', 'test-no-symlinks.hg')

# only makes sense to test on os which supports symlinks
if not getattr(os, "symlink", False):
    sys.exit(80) # SKIPPED_STATUS defined in run-tests.py

u = ui.ui()
# hide outer repo
hg.peer(u, {}, '.', create=True)

# clone with symlink support
hg.clone(u, {}, BUNDLEPATH, 'test0')

repo = hg.repository(u, 'test0')

# wait a bit, or the status call wont update the dirstate
time.sleep(1)
commands.status(u, repo)

# now disable symlink support -- this is what os.symlink would do on a
# non-symlink file system
def symlink_failure(src, dst):
    raise OSError(1, "Operation not permitted")
os.symlink = symlink_failure

# dereference links as if a Samba server has exported this to a
# Windows client
Example #42
0
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
                networkattemptlimit, networkattempts=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit, networkattempts)

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True

        return False

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {}, cloneurl, dest=dest, update=False,
                           shareopts={'pool': sharebase, 'mode': 'identity'})
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                                   'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                          # The function expects all arguments to be
                          # defined.
                          **{'print': None, 'print0': None, 'dirs': None,
                             'files': None}):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
    def run_wsgi(self, req):
        path = req.env['PATH_INFO'].replace('\\', '/').strip('/')

        u = util.url(self.serverurl)
        # Forward HTTP basic authorization headers through the layers
        authheader = req.env.get('HTTP_AUTHORIZATION')
        if authheader and authheader.lower().startswith('basic '):
            userpasswd = authheader[6:].decode('base64')
            if ':' in userpasswd:
                u.user, u.passwd = userpasswd.split(':', 1)

        proto = protocol.webproto(req, self.ui)
        # MIME and HTTP allows multiple headers by the same name - we only
        # use and care about one
        args = dict((k, v[0]) for k, v in proto._args().items())
        cmd = args.pop('cmd', None)

        self.ui.write("%s@%s  cmd: %s  args: %s\n" %
                      (u.user, path or '/', cmd, ' '.join('%s=%s' % (k, v)
                       for k, v in sorted(args.items()))))

        if not cmd:
            if self.index:
                req.respond(common.HTTP_OK,
                            'text/html' if self.index.endswith('.html') else
                            'text/plain')
                return file(self.index)
            self.ui.warn(_('no command in request\n'))
            req.respond(common.HTTP_BAD_REQUEST, protocol.HGTYPE)
            return []

        # Simple path validation - probably only sufficient on Linux
        if ':' in path or path.startswith('.') or '/.' in path:
            self.ui.warn(_('bad request path %r\n') % path)
            req.respond(common.HTTP_BAD_REQUEST, protocol.HGTYPE)
            return []

        # Bounce early on missing credentials
        if not (self.anonymous or u.user and u.passwd):
            er = common.ErrorResponse(common.HTTP_UNAUTHORIZED,
                                      'Authentication is mandatory',
                                      self.authheaders)
            req.respond(er, protocol.HGTYPE)
            return ['HTTP authentication required']

        u.path = posixpath.join(u.path or '', req.env['PATH_INFO']).strip('/')
        url = str(u)

        repopath = os.path.join(self.cachepath, path)
        path = path or '/'

        try:
            # Reuse auth if possible - checking remotely is expensive
            peer, ts = peercache.get((u.user, u.passwd, path), (None, None))
            if peer is not None and time.time() > ts + self.ttl:
                self.ui.note(_('%s@%s expired, age %s\n') %
                             (u.user, path, time.time() - ts))
                peer = None
                peercache[(u.user, u.passwd, path)] = (peer, ts)
            # peer is now None or valid

            try:
                repo = hg.repository(self.ui, path=repopath)
            except error.RepoError as e:
                hg.peer(self.ui, {}, url) # authenticate / authorize first
                if os.path.exists(repopath) or not self.clone:
                    self.ui.warn(_("error with path %r: %s\n") % (path, e))
                    req.respond(common.HTTP_NOT_FOUND, protocol.HGTYPE)
                    return ['repository %s not found in proxy' % path]
                self.ui.warn(_("%r not found locally - cloning\n") % path)
                try:
                    repodir = os.path.dirname(repopath)
                    if not os.path.exists(repodir):
                        os.makedirs(repodir)
                    peer, destpeer = hg.clone(self.ui, {}, url, repopath,
                                              stream=True, update=False)
                except Exception as e:
                    self.ui.warn(_("error cloning %r: %s\n") % (path, e))
                    req.respond(common.HTTP_NOT_FOUND, protocol.HGTYPE)
                    return ['repository %s not available' % path]
                repo = destpeer.local()

            if cmd in ['capabilities', 'batch', 'lookup', 'branchmap'] and not peer:
                # new session on expired repo - do auth and pull again
                self.ui.note(_('%s@%s - pulling\n') % (u.user, path))
                t0 = time.time()
                peer = hg.peer(self.ui, {}, url)
                with repo.lock():
                    try:
                        r = pull(repo, peer)
                    except error.RepoError as e:
                        self.ui.debug('got %s on pull - running recover\n' % (e,))
                        repo.recover()
                        # should also run hg.verify(repo) ... but too expensive
                        r = pull(repo, peer)
                self.ui.debug('pull got %r after %s\n' % (r, time.time() - t0))
                peercache[(u.user, u.passwd, path)] = (peer, time.time())
            elif ts is None: # never authenticated
                self.ui.note('%s@%s - authenticating\n' % (u.user, path))
                peer = hg.peer(self.ui, {}, url)
                self.ui.debug('%s@%s - authenticated\n' % (u.user, path))
                peercache[(u.user, u.passwd, path)] = (peer, time.time())
            # user is now auth'ed for this session

            # fetch largefiles whenever they are referenced
            # (creating fake/combined batch statlfile responses is too complex)
            shas = []
            if cmd in ['statlfile', 'getlfile']:
                shas.append(args['sha'])
            if cmd == 'batch':
                for x in args['cmds'].split(';'):
                    if x.startswith('statlfile sha='):
                        shas.append(x[14:])
            missingshas = [sha for sha in shas
                           if not lfutil.findfile(repo, sha)]
            if missingshas:
                self.ui.debug('%s@%s - missing %s\n' %
                              (u.user, path, ' '.join(missingshas)))
                if not peer:
                    peer = hg.peer(self.ui, {}, url)
                store = openstore(repo, peer, False)
                existsremotely = store.exists(missingshas)
                for sha, available in sorted(existsremotely.iteritems()):
                    if not available:
                        self.ui.warn('%s@%s - %s not available remotely\n' %
                                     (u.user, path, sha))
                        continue
                    self.ui.write('%s@%s - fetching %s\n' % (u.user, path, sha))
                    gotit = store._gethash(sha, sha)
                    if not gotit:
                        self.ui.warn(_('failed to get %s for %s@%s remotely\n'
                                       ) % (sha, u.user, path))
                peercache[(u.user, u.passwd, path)] = (peer, time.time())

            # Forward write commands to the remote server.
            # Lookup and listkeys are also forwarded so we get
            # local tags, bookmarks and phases from the server
            if cmd in ['putlfile', 'unbundle', 'pushkey', 'lookup', 'listkeys']:
                size = req.env.get('CONTENT_LENGTH')
                self.ui.debug('reading %s bytes content before forwarding\n'
                              % size)
                data = None
                if req.env['REQUEST_METHOD'] == 'POST' or size is not None:
                    data = req.read(int(size or 0))

                if not peer:
                    peer = hg.peer(self.ui, {}, url)
                self.ui.note(_('calling %s remotely\n') % cmd)
                with repo.lock():
                    r = peer._call(cmd, data=data, **args)
                    if cmd == 'unbundle':
                        self.ui.debug('fetching pushed changes back\n')
                        # we could perhaps just have pulled from data ... but it
                        # could be tricky to make sure the repo stays in sync ...
                        pull(repo, peer)
                peercache[(u.user, u.passwd, path)] = (peer, time.time())
                req.respond(common.HTTP_OK, protocol.HGTYPE)
                return [r]

            # Now serve it locally
            return protocol.call(repo, req, cmd)

        except urllib2.HTTPError as inst:
            self.ui.warn(_('HTTPError connecting to server: %s\n') % inst)
            req.respond(inst.code, protocol.HGTYPE)
            return ['HTTP error']
        except error.Abort as e: # hg.peer will abort when it gets 401
            if e.args not in [('http authorization required',),
                              ('authorization failed',)]:
                raise
            self.ui.warn('%s@%s error: %r\n' % (u.user, path, e.args[0]))
            er = common.ErrorResponse(
                common.HTTP_UNAUTHORIZED
                if e.args == ('http authorization required',)
                else common.HTTP_BAD_REQUEST,
                'Authentication is required',
                self.authheaders)
            req.respond(er, protocol.HGTYPE)
            return ['HTTP authentication required']
        except Exception as e:
            msg = 'Internal proxy server error - please contact the administrator: %s' % e
            self.ui.warn('%s\n' % msg) # TODO: log traceback?
            req.respond(common.ErrorResponse(common.HTTP_SERVER_ERROR, msg), 'text/plain')
            return [msg]
Example #44
0
    def install(self):
        if exists(_userbase_dir + sep):
            count = 1
            while exists(_userbase_dir + '-' + str(count)):
                count += 1
            new_sys = _userbase_dir + '-' + str(count)
            copytree(_userbase_dir + sep, new_sys)
            log.AppendText("Setup has had to rename your old " +
                           _userbase_dir + "\n")
            log.AppendText("directory, it has been moved to " + new_sys + "\n")
            log.AppendText(
                "So make sure you go check there to see if there is anything in "
            )
            log.AppendText("that myfiles directory that you need!!\n")
            wx.Yield()
            log.AppendText(
                "Removing old Directories: (this could take a moment)\n\n")
            wx.Yield()
            rmtree(_userbase_dir + sep)
            mkdir(_userbase_dir)
            chdir(_userbase_dir)

        log.AppendText("Installing OpenRPG user files to " + dyn_dir + "\n")
        log.AppendText(
            "Fetching OpenRPG runtime Files: (this could take a moment)\n")
        wx.Yield()
        wx.BeginBusyCursor()
        hg.clone(self.ui, 'http://hg.assembla.com/traipse', dest=_userbase_dir)
        self.repo = hg.repository(self.ui, _userbase_dir)
        log.AppendText("\n")
        log.AppendText('Halloween 2009 Edition!!\n')
        log.AppendText('Copying Portable Mercurial to ' + _userbase_dir + sep +
                       'upmana\n')
        if exists(_userbase_dir + sep + 'upmana' + sep + 'mercurial'):
            rmtree(_userbase_dir + sep + 'upmana' + sep + 'mercurial')
        copytree(_home + sep + 'mercurial',
                 _userbase_dir + sep + 'upmana' + sep + 'mercurial')
        rmtree(_userbase_dir + sep + 'upmana' + sep + 'mercurial' + sep +
               'hgweb')
        copytree(
            _userbase_dir + sep + 'upmana' + sep + 'mercurial' + sep +
            'portable_hgweb',
            _userbase_dir + sep + 'upmana' + sep + 'mercurial' + sep + 'hgweb')
        rmtree(_userbase_dir + sep + 'upmana' + sep + 'mercurial' + sep +
               'portable_hgweb')
        wx.EndBusyCursor()

        #############

        try:
            log.AppendText("\n")
            _user_dir = _userbase_dir + sep + 'myfiles'
            #makedirs(_user_dir) #Removed because Traipse has a myfiles directory in the repo.
            log.AppendText("Creating the myfiles Directory:\n")
            wx.Yield()
            log.AppendText(
                "You should copy your old myfiles directory to this new location\n"
            )
            wx.Yield()
            makedirs(_user_dir + sep + "runlogs" + sep)
            log.AppendText("Creating the runlogs directory\n")
            wx.Yield()
            makedirs(_user_dir + sep + "logs" + sep)
            log.AppendText("Creating the Chat Logs directory\n")
            wx.Yield()
            # makedirs(_user_dir + sep + "webfiles" + sep) # Traipse contains a webfiles folder
            log.AppendText("Creating the Webfiles Directory\n")
            wx.Yield()
        except:
            pass

        log.AppendText(
            "Setting your copy to the Ornery Orc Release. You are invited to modify "
        )
        log.AppendText("the code. Re-run setup.py if you break it hard.\n\n")
        wx.Yield()
        chdir(_userbase_dir)
        hg.update(self.repo, "ornery-orc")
        log.AppendText('\n')
        wx.Yield()
        log.AppendText(
            "Traipse 'OpenRPG' has been setup, You can now run OpenRPG via one of "
        )
        log.AppendText(
            "the launcher scripts (Traipse.pyw, Server.py, Server_GUI.py)\n")
        log.AppendText("Do not forget your new myfiles location is " +
                       _user_dir + "\n")
        log.AppendText(
            "You will need to copy your old myfile directory there so that you "
        )
        log.AppendText("have access to your settings and game trees\n")
        wx.Yield()
        log.AppendText(
            "\n\nDONE! You can close this window now and delete the _del ")
        log.AppendText(
            "directory after you have ensure OpenRPG is running properly")
Example #45
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing " b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse " b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
            ui.warn(
                b"(shared store does not belong to pooled storage; "
                b"deleting destination to improve efficiency)\n"
            )
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(
            b"(dest has an active working directory lock; assuming it is "
            b"left over from a previous process and that the destination "
            b"is corrupt; deleting it just to be sure)\n"
        )
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(
                b"reached maximum number of network attempts; " b"giving up\n"
            )

        ui.warn(
            b"(retrying after network failure on attempt %d of %d)\n"
            % (networkattempts[0], networkattemptlimit)
        )

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(_(b"unexpected response from remote server:")):
                ui.warn(b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" % pycompat.bytestr(e.reason))
                handlenetworkfailure()
                return True
            else:
                ui.warn(
                    b"unhandled URLError; reason type: %s; value: %s\n"
                    % (e.reason.__class__.__name__, e.reason)
                )
        else:
            ui.warn(
                b"unhandled exception during network operation; type: %s; "
                b"value: %s\n" % (e.__class__.__name__, e)
            )

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone " b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(
            b"(shared store has an active lock; assuming it is left "
            b"over from a previous process and that the store is "
            b"corrupt; deleting store and destination just to be "
            b"sure)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(
            b"(shared store missing requires file; this is a really "
            b"odd failure; deleting store and destination)\n"
        )
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(
                b"(shared store missing requirements: %s; deleting "
                b"store and destination to ensure optimal behavior)\n"
                % b", ".join(sorted(missing))
            )
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires", "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(
                    b"(remote resolved %s to %s; "
                    b"result is not deterministic)\n" % (branch, checkoutrevision)
                )

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purgeext = extensions.find(b"purge")

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, ""
                    )

            with timeit("purge", "purge"):
                if purgeext.purge(
                    ui,
                    repo,
                    all=True,
                    abort_on_err=True,
                    # The function expects all arguments to be
                    # defined.
                    **{"print": None, "print0": None, "dirs": None, "files": None}
                ):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(
                b"sparse profile %s does not exist at revision "
                b"%s" % (sparse_profile, checkoutrevision)
            )

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(
                repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
            )
        else:
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
            ui.write(
                b"(sparse profile %s already set; no need to update "
                b"sparse config)\n" % sparse_profile
            )
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(
                    b"(replacing existing sparse config with profile "
                    b"%s)\n" % sparse_profile
                )
            else:
                ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(
                            repo, [], [], [sparse_profile], force=True
                        ),
                    )
                )

                repo.ui.status(
                    b"%d files added, %d files dropped, "
                    b"%d files conflicting\n" % tuple(fcounts)
                )

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
Example #46
0
	def clone(self):
		hg.clone(ui.ui(), '', self.repository, self.directory)