Example #1
0
 def __init__(self, repo, name, filetype=None):
     self.repo = repo
     self.name = name
     self.vfs = scmutil.vfs(repo.join('shelved'))
     self.backupvfs = scmutil.vfs(repo.join(backupdir))
     self.ui = self.repo.ui
     if filetype:
         self.fname = name + '.' + filetype
     else:
         self.fname = name
Example #2
0
 def __init__(self, repo, name, filetype=None):
     self.repo = repo
     self.name = name
     self.vfs = scmutil.vfs(repo.join(shelvedir))
     self.backupvfs = scmutil.vfs(repo.join(backupdir))
     self.ui = self.repo.ui
     if filetype:
         self.fname = name + '.' + filetype
     else:
         self.fname = name
    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)
Example #4
0
 def __init__(self, testcase, dir, pidoffset=0):
     self._testcase = testcase
     self._acquirecalled = False
     self._releasecalled = False
     self._postreleasecalled = False
     self.vfs = scmutil.vfs(dir, audit=False)
     self._pidoffset = pidoffset
Example #5
0
 def __init__(self, testcase, dir, pidoffset=0):
     self._testcase = testcase
     self._acquirecalled = False
     self._releasecalled = False
     self._postreleasecalled = False
     self.vfs = scmutil.vfs(dir, audit=False)
     self._pidoffset = pidoffset
Example #6
0
 def __init__(self, repo, name, filetype=None):
     self.repo = repo
     self.name = name
     self.vfs = scmutil.vfs(repo.join('shelved'))
     if filetype:
         self.fname = name + '.' + filetype
     else:
         self.fname = name
    def __init__(self, repo):
        self._repo = repo
        self._vfs = scmutil.vfs(repo.vfs.join('reviewboard'), audit=False)

        # Maps review identifiers to identifierrecord instances.
        self._identifiers = {}
        # Maps parent review id to identifierrecord instances. Shares the same
        # object instances as _identifiers.
        self._prids = {}

        # Maps nodes to noderecord instances.
        self._nodes = {}

        self.baseurl = None
        self.remoteurl = None

        try:
            for line in repo.vfs('reviews'):
                line = line.strip()
                if not line:
                    continue

                fields = line.split(' ', 1)
                if len(fields) != 2:
                    repo.ui.warn(_('malformed line in reviews file: %r\n') %
                                   line)
                    continue

                t, d = fields

                # Identifier to parent review ID.
                if t == 'p':
                    ident, rrid = d.split(' ', 1)
                    r = identifierrecord(parentrrid=rrid)
                    self._identifiers[ident] = r
                    self._prids[rrid] = r
                # Node to review id.
                elif t == 'c':
                    node, rid = d.split(' ', 1)
                    assert len(node) == 40
                    r = self._nodes.setdefault(bin(node), noderecord())
                    r.rrids.add(rid)
                # Node to parent id.
                elif t == 'pc':
                    node, pid = d.split(' ', 1)
                    assert len(node) == 40
                    self._nodes[bin(node)].parentrrids.add(pid)
                elif t == 'u':
                    self.baseurl = d
                elif t == 'r':
                    self.remoteurl = d

        except IOError as inst:
            if inst.errno != errno.ENOENT:
                raise
Example #8
0
    def __init__(self, repo):
        self._repo = repo
        self._vfs = scmutil.vfs(repo.vfs.join('reviewboard'), audit=False)

        # Maps review identifiers to identifierrecord instances.
        self._identifiers = {}
        # Maps parent review id to identifierrecord instances. Shares the same
        # object instances as _identifiers.
        self._prids = {}

        # Maps nodes to noderecord instances.
        self._nodes = {}

        self.baseurl = None
        self.remoteurl = None

        try:
            for line in repo.vfs('reviews'):
                line = line.strip()
                if not line:
                    continue

                fields = line.split(' ', 1)
                if len(fields) != 2:
                    repo.ui.warn(
                        _('malformed line in reviews file: %r\n') % line)
                    continue

                t, d = fields

                # Identifier to parent review ID.
                if t == 'p':
                    ident, rrid = d.split(' ', 1)
                    r = identifierrecord(parentrrid=rrid)
                    self._identifiers[ident] = r
                    self._prids[rrid] = r
                # Node to review id.
                elif t == 'c':
                    node, rid = d.split(' ', 1)
                    assert len(node) == 40
                    r = self._nodes.setdefault(bin(node), noderecord())
                    r.rrids.add(rid)
                # Node to parent id.
                elif t == 'pc':
                    node, pid = d.split(' ', 1)
                    assert len(node) == 40
                    self._nodes[bin(node)].parentrrids.add(pid)
                elif t == 'u':
                    self.baseurl = d
                elif t == 'r':
                    self.remoteurl = d

        except IOError as inst:
            if inst.errno != errno.ENOENT:
                raise
Example #9
0
def cleanupoldbackups(repo):
    vfs = scmutil.vfs(repo.join(backupdir))
    maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
    hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
    hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
    if 0 < maxbackups and maxbackups < len(hgfiles):
        bordermtime = hgfiles[-maxbackups][0]
    else:
        bordermtime = None
    for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
        if mtime == bordermtime:
            # keep it, because timestamp can't decide exact order of backups
            continue
        base = f[:-3]
        for ext in 'hg patch'.split():
            try:
                vfs.unlink(base + '.' + ext)
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
Example #10
0
def cleanupoldbackups(repo):
    vfs = scmutil.vfs(repo.join(backupdir))
    maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
    hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
    hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
    if 0 < maxbackups and maxbackups < len(hgfiles):
        bordermtime = hgfiles[-maxbackups][0]
    else:
        bordermtime = None
    for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
        if mtime == bordermtime:
            # keep it, because timestamp can't decide exact order of backups
            continue
        base = f[:-3]
        for ext in 'hg patch'.split():
            try:
                vfs.unlink(base + '.' + ext)
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
Example #11
0
def share(orig, ui, source, *args, **kwargs):
    """Wraps hg.share to mark the firefoxtrees file as shared.

    The .hg/shared file lists things that are shared. We add firefoxtrees
    to it if we are a Firefox repo.
    """
    res = orig(ui, source, *args, **kwargs)

    # TODO Mercurial 3.7 introduces a standalone function that receives the
    # proper arguments so we can avoid this boilerplate.
    if isinstance(source, str):
        origsource = ui.expandpath(source)
        source, branches = hg.parseurl(origsource)
        srcrepo = hg.repository(ui, source)
    else:
        srcrepo = source.local()

    if not isfirefoxrepo(srcrepo):
        return res

    if args:
        dest = args[0]
    elif 'dest' in kwargs:
        dest = kwargs['dest']
    else:
        dest = None

    if not dest:
        dest = hg.defaultdest(source)
    else:
        dest = ui.expandpath(dest)

    destwvfs = scmutil.vfs(dest, realpath=True)
    r = hg.repository(ui, destwvfs.base)

    with r.vfs('shared', 'ab') as fh:
        fh.write('firefoxtrees\n')

    return res
Example #12
0
def share(orig, ui, source, *args, **kwargs):
    """Wraps hg.share to mark the firefoxtrees file as shared.

    The .hg/shared file lists things that are shared. We add firefoxtrees
    to it if we are a Firefox repo.
    """
    res = orig(ui, source, *args, **kwargs)

    # TODO Mercurial 3.7 introduces a standalone function that receives the
    # proper arguments so we can avoid this boilerplate.
    if isinstance(source, str):
        origsource = ui.expandpath(source)
        source, branches = hg.parseurl(origsource)
        srcrepo = hg.repository(ui, source)
    else:
        srcrepo = source.local()

    if not isfirefoxrepo(srcrepo):
        return res

    if args:
        dest = args[0]
    elif 'dest' in kwargs:
        dest = kwargs['dest']
    else:
        dest = None

    if not dest:
        dest = hg.defaultdest(source)
    else:
        dest = ui.expandpath(dest)

    destwvfs = scmutil.vfs(dest, realpath=True)
    r = hg.repository(ui, destwvfs.base)

    with r.vfs('shared', 'ab') as fh:
        fh.write('firefoxtrees\n')

    return res
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase):
    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase)

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.
    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {}, cloneurl, dest=dest, update=False,
                           shareopts={'pool': sharebase, 'mode': 'identity'})
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                                   'SHA-1 fragment')

        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            pullop = exchange.pull(repo, remote, heads=pullrevs)
            if not pullop.rheads:
                raise error.Abort('unable to pull requested revision')
        except error.Abort as e:
            if e.message == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return callself()

            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                          # The function expects all arguments to be
                          # defined.
                          **{'print': None, 'print0': None, 'dirs': None,
                             'files': None}):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=revision or branch, clean=True):
        raise error.Abort('error updating')

    ctx = repo[revision or branch]
    ui.write('updated to %s\n' % ctx.hex())
    return None
Example #14
0
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
                networkattemptlimit, networkattempts=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit, networkattempts)

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True

        return False

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {}, cloneurl, dest=dest, update=False,
                           shareopts={'pool': sharebase, 'mode': 'identity'})
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                                   'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                          # The function expects all arguments to be
                          # defined.
                          **{'print': None, 'print0': None, 'dirs': None,
                             'files': None}):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
Example #15
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit, networkattempts)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True

        return False

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {},
                           cloneurl,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(
                ui,
                repo,
                all=True,
                abort_on_err=True,
                # The function expects all arguments to be
                # defined.
                **{
                    'print': None,
                    'print0': None,
                    'dirs': None,
                    'files': None
                }):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None