Exemple #1
0
def exclone(orig, ui, *args, **opts):
    """
    We may not want local bookmarks on clone... but we always want remotenames!
    """
    srcpeer, dstpeer = orig(ui, *args, **opts)
    repo = dstpeer.local()

    # Skip this function is the modern clone code path is used, which will
    # update selective pull remote bookmarks (but it will not write all remote
    # bookmarks, which is considered as a legacy behavior).
    if opts.get("clonecodepath") not in {"legacy-pull", "copy"}:
        return (srcpeer, dstpeer)

    with repo.wlock(), repo.lock(), repo.transaction("exclone") as tr:
        if _isselectivepull(ui):
            remotebookmarkskeys = selectivepullbookmarknames(repo)
            remotebookmarks = _listremotebookmarks(srcpeer,
                                                   remotebookmarkskeys)
            # Clone pulled with selectivepull disabled.  Hide all the commits
            # so we only get the ones we want.
            visibility.setvisibleheads(repo, [])
        else:
            remotebookmarks = srcpeer.listkeys("bookmarks")
        pullremotenames(repo, srcpeer, remotebookmarks)

        if not ui.configbool("remotenames", "syncbookmarks"):
            ui.debug("remotenames: removing cloned bookmarks\n")
            for vfs in [repo.localvfs, repo.sharedvfs, repo.svfs]:
                if vfs.tryread("bookmarks"):
                    vfs.write("bookmarks", b"")
            # Invalidate bookmark caches.
            repo._filecache.pop("_bookmarks", None)
            repo.__dict__.pop("_bookmarks", None)
            # Avoid writing out bookmarks on transaction close.
            tr.removefilegenerator("bookmarks")

        return (srcpeer, dstpeer)
Exemple #2
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head
            for head in newheads
            if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [head for head in cloudrefs.heads if head not in omittedheads]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head
                for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(
                oldvisibleheads + cloudheads + localheads
            )
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [head for head in newvisibleheads if head not in toremove]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks(
            repo, cloudrefs.remotebookmarks, lastsyncstate
        )

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots
        newheads += addedsnapshots

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(
            list(remotebookmarknewnodes) + newheads, cloudrefs.headdates
        )
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)
    )

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates
        )

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(
                tr, addnodes=addedsnapshots, removenodes=removedsnapshots
            )

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool(
        "mutation", "proxy-obsstore"
    ):
        unfi = repo
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head
                    for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=omittedheads,
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
        newsnapshots=newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Exemple #3
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    newheads = [
        nodemod.hex(n) for n in repo.changelog.filternodes(
            [nodemod.bin(h) for h in cloudrefs.heads], inverse=True)
    ]
    assert newheads == newheads
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            omittedheadslen = len(omittedheads)
            repo.ui.status(
                _n(
                    "omitting %d head that is older than %d days:\n",
                    "omitting %d heads that are older than %d days:\n",
                    omittedheadslen,
                ) % (omittedheadslen, maxage))
            counter = 0
            for head in reversed(omittedheads):
                if counter == _maxomittedheadsoutput:
                    remaining = len(omittedheads) - counter
                    repo.ui.status(
                        _n("  and %d older head\n", "  and %d older heads\n",
                           remaining) % remaining)
                    break
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
                counter = counter + 1

        omittedheads = set(omittedheads)
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = set()
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates,
         remotebookmarknewnodes) = _processremotebookmarks(
             repo, cloudrefs.remotebookmarks, lastsyncstate)

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(repo.ui,
                                     list(remotebookmarknewnodes) + newheads,
                                     cloudrefs.headdates)
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate,
                        omittedheads, maxage))

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates)
        newremotebookmarks = cloudrefs.remotebookmarks

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=list(omittedheads),
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Exemple #4
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul")

    try:
        remotenames = extensions.find("remotenames")
    except KeyError:
        remotenames = None

    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo.unfiltered()
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(
                _("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknodes = []
    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks = _processremotebookmarks(repo,
                                                     cloudrefs.remotebookmarks,
                                                     lastsyncstate)

        # Pull public commits, which remote bookmarks point to, if they are not
        # present locally.
        for node in newremotebookmarks.values():
            if node not in unfi:
                remotebookmarknodes.append(node)

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots

    # TODO(alexeyqu): pull snapshots separately
    newheads += addedsnapshots

    backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads])

    if remotebookmarknodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = ([remotebookmarknodes] if remotebookmarknodes else
                      []) + _partitionheads(newheads, cloudrefs.headdates)

        def disabled(*args, **kwargs):
            pass

        # Disable pulling of obsmarkers
        wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete",
                                             disabled)

        # Disable pulling of bookmarks
        wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks",
                                              disabled)

        # Disable pulling of remote bookmarks
        if remotenames:
            wrapremotenames = extensions.wrappedfunction(
                remotenames, "pullremotenames", disabled)
        else:
            wrapremotenames = util.nullcontextmanager()

        # Disable automigration and prefetching of trees
        configoverride = repo.ui.configoverride(
            {
                ("pull", "automigrate"): False,
                ("treemanifest", "pullprefetchrevs"): ""
            },
            "cloudsyncpull",
        )

        prog = progress.bar(repo.ui,
                            _("pulling from commit cloud"),
                            total=len(headgroups))
        with wrapobs, wrapbook, wrapremotenames, configoverride, prog:
            for index, headgroup in enumerate(headgroups):
                headgroupstr = " ".join([head[:12] for head in headgroup])
                repo.ui.status(_("pulling %s\n") % headgroupstr)
                prog.value = (index, headgroupstr)
                pullopts["rev"] = headgroup
                pullcmd(repo.ui, repo, remotepath, **pullopts)
                repo.connectionpool.close()

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate))

    if _isremotebookmarkssyncenabled(repo.ui):
        _updateremotebookmarks(repo, tr, newremotebookmarks)

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(tr,
                                     addnodes=addedsnapshots,
                                     removenodes=removedsnapshots)

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        unfi = repo.unfiltered()
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo,
                                       [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        cloudrefs.version,
        cloudrefs.heads,
        cloudrefs.bookmarks,
        omittedheads,
        omittedbookmarks,
        maxage,
        newremotebookmarks,
        newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Exemple #5
0
def cloudjoin(ui, repo, **opts):
    """connect the local repository to commit cloud

    Commits and bookmarks will be synchronized between all repositories that
    have been connected to the service.

    Use `hg cloud sync` to trigger a new synchronization.
    """

    tokenlocator = tokenmod.TokenLocator(ui)
    checkauthenticated(ui, repo, tokenlocator)

    workspacename = workspace.parseworkspace(ui, opts)
    if workspacename is None:
        workspacename = workspace.defaultworkspace(ui)

    currentworkspace = workspace.currentworkspace(repo)

    switch = opts.get("switch")
    merge = opts.get("merge")
    create = opts.get("create")

    if switch and merge:
        ui.status(
            _("'switch' and 'merge' options can not be provided together, please choose one over another\n"
              ),
            component="commitcloud",
        )
        return 1

    if currentworkspace == workspacename:
        ui.status(
            _("this repository has been already connected to the '%s' workspace for the '%s' repo\n"
              ) % (workspacename, ccutil.getreponame(repo)),
            component="commitcloud",
        )
        return cloudsync(ui, repo, **opts)

    # Check the current workspace and perform necessary clean up.
    # If the local repository is already connected to some workspace,
    # make sure that we perform correct merge or switch.
    # If the local repository is not connected yet to any workspace,
    # all local changes will be moved to the destination workspace (merge).
    if currentworkspace:
        if not switch and not merge:
            ui.status(
                _("this repository is already connected to the '%s' workspace, run `hg cloud join --help`\n"
                  ) % currentworkspace,
                component="commitcloud",
            )
            return 1

        # check that the workspace exists if the destination workspace
        # doesn't equal to the default workspace for the current user
        if not create and workspace != workspace.defaultworkspace(ui):
            if not service.get(ui,
                               tokenmod.TokenLocator(ui).token).getworkspaces(
                                   ccutil.getreponame(repo), workspacename):
                raise error.Abort(
                    _("this repository can not be switched to the '%s' workspace\n"
                      "the workspace doesn't exist (please use --create option to create the workspace)"
                      ) % workspacename)

        if switch:
            # sync all the current commits and bookmarks before switching
            cloudsync(ui, repo, **opts)
            ui.status(
                _("now this repository will be switched from the '%s' to the '%s' workspace\n"
                  ) % (currentworkspace, workspacename),
                component="commitcloud",
            )
            with backuplock.lock(
                    repo), repo.wlock(), repo.lock(), repo.transaction(
                        "commit cloud switch workspace clean up transaction"
                    ) as tr:
                # check uncommitted changes
                if any(repo.status()):
                    raise error.Abort(
                        _("this repository can not be switched to the '%s' workspace due to uncommitted changes"
                          ) % workspacename)
                # check that the current location is a public commit
                if repo["."].mutable():
                    raise error.Abort(
                        _("this repository can not be switched to the '%s' workspace\n"
                          "please update your location to a public commit first"
                          ) % workspacename)
                # remove heads and bookmarks before connecting to a new workspace
                visibility.setvisibleheads(repo, [])
                # remove all local bookmarks
                bmremove = []
                for key in sync._getbookmarks(repo).keys():
                    bmremove.append((key, None))
                repo._bookmarks.applychanges(repo, tr, bmremove)
                # remove all remote bookmarks (if sync of them enabled)
                bmremove = {
                    key: nodemod.nullhex
                    for key in sync._getremotebookmarks(repo).keys()
                }
                sync._updateremotebookmarks(repo, tr, bmremove)
                # erase state of the remote accessed bookmarks as well
                bookmarksmod.cleanselectivepullaccessedbookmarks(repo)
                # erase state if the repo has been connected before to the destination workspace
                syncstate.SyncState.erasestate(repo, workspacename)
                # clear subscription
                subscription.remove(repo)
                # clear workspace
                workspace.clearworkspace(repo)

        if merge:
            ui.status(
                _("this repository will be reconnected from the '%s' to the '%s' workspace\n"
                  ) % (currentworkspace, workspacename),
                component="commitcloud",
            )
            ui.status(
                _("all local commits and bookmarks will be merged into '%s' workspace\n"
                  ) % workspacename,
                component="commitcloud",
            )
            # TODO: suggest user to archive the old workspace if they want to
            # clear subscription
            subscription.remove(repo)
            # clear workspace
            workspace.clearworkspace(repo)
    else:
        if switch:
            ui.status(
                _("this repository can not be switched to the '%s' workspace because not joined to any workspace, run `hg cloud join --help`\n"
                  ) % workspacename,
                component="commitcloud",
            )
            return 1

    # connect to a new workspace
    workspace.setworkspace(repo, workspacename)
    ui.status(
        _("this repository is now connected to the '%s' workspace for the '%s' repo\n"
          ) % (workspacename, ccutil.getreponame(repo)),
        component="commitcloud",
    )
    cloudsync(ui, repo, **opts)