Пример #1
0
    def gethistoricalversions(self, reponame, workspace):
        self.ui.debug("sending 'get_historical_versions' request\n",
                      component="commitcloud")
        path = "/commit_cloud/get_historical_versions"
        data = {"repo_name": reponame, "workspace": workspace}
        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("response received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        versions = response["versions"]["versions"]

        self.ui.debug(
            "'get_historical_versions' returns %d entries\n" % len(versions),
            component="commitcloud",
        )

        try:
            return versions
        except Exception as e:
            raise ccerror.UnexpectedError(self.ui, e)
Пример #2
0
 def _timedsend(self, path, data):
     start = util.timer()
     response = self._send(path, data)
     elapsed = util.timer() - start
     self.ui.debug("response received in %0.2f sec\n" % elapsed,
                   component="commitcloud")
     return response
Пример #3
0
def send(ui, repo, parent1, **kwargs):
    try:
        ui.debug("sending new checkout location to commit cloud: %s\n" %
                 parent1)
        start = util.timer()
        commit = parent1
        reponame = ccutil.getreponame(repo)
        workspacename = workspace.currentworkspace(repo)
        if workspacename is None:
            workspacename = workspace.defaultworkspace(ui)
        if workspacename is None:
            return
        tokenlocator = token.TokenLocator(ui)
        serv = service.get(ui, tokenlocator.token)
        hostname = socket.gethostname()
        sharedpath = repo.sharedpath
        checkoutpath = repo.path
        unixname = ui.username()
        serv.updatecheckoutlocations(
            reponame,
            workspacename,
            hostname,
            commit,
            checkoutpath,
            sharedpath,
            unixname,
        )

        elapsed = util.timer() - start
        ui.debug("finished in %0.2f sec\n" % elapsed)
    except Exception as e:
        ui.debug("syncing checkout locations failed with error: %s" % str(e))
        pass
Пример #4
0
def checkhgserver(ui, repo, opts, path):
    ui.status(
        _("Testing connection to Mercurial on the server: querying master bookmark\n"
          ),
        component="debugnetwork",
    )
    starttime = util.timer()
    peer = None
    try:
        peer = hg.peer(repo, opts, path)
        bookmarks = peer.listkeys("bookmarks")
        master = bookmarks.get("master")
    except Exception as e:
        ui.status(_("failed to connect to Mercurial: %s\n") % e,
                  error=_("error"))
        return False
    finally:
        if peer:
            peer.close()
    endtime = util.timer()

    ui.status(
        _("Connected ok: %s\n") % util.timecount(endtime - starttime),
        component="debugnetwork",
    )
    if master:
        ui.status(_("Server master bookmark is %s\n") % master,
                  component="debugnetwork")
    else:
        ui.status(_("Server has no master bookmark\n"),
                  component="debugnetwork")
    return True
Пример #5
0
 def downloadtest(description, bytecount):
     pipeo.write(b"download %i\n" % bytecount)
     pipeo.flush()
     l = pipei.readline()
     if not l or not l.startswith(b"download bytes"):
         raise error.Abort("invalid response from server: %r" % l)
     bytecount = int(l.split()[2])
     with progress.bar(ui,
                       description,
                       total=bytecount,
                       formatfunc=util.bytecount) as prog:
         starttime = util.timer()
         remaining = bytecount
         while remaining > 0:
             data = pipei.read(min(remaining, BLOCK_SIZE))
             if not data:
                 raise error.Abort(
                     "premature end of speed-test download stream")
             remaining -= len(data)
             prog.value = bytecount - remaining
         l = pipei.readline()
         if not l or not l.startswith(b"download complete"):
             raise error.Abort("invalid response from server: %r" % l)
         endtime = util.timer()
     return endtime - starttime
Пример #6
0
def _timer(fm, func, title=None):
    gc.collect()
    results = []
    begin = util.timer()
    count = 0
    while True:
        ostart = os.times()
        cstart = util.timer()
        r = func()
        cstop = util.timer()
        ostop = os.times()
        count += 1
        a, b = ostart, ostop
        results.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
        if cstop - begin > 3 and count >= 100:
            break
        if cstop - begin > 10 and count >= 3:
            break

    fm.startitem()

    if title:
        fm.write("title", "! %s\n", title)
    if r:
        fm.write("result", "! result: %s\n", r)
    m = min(results)
    fm.plain("!")
    fm.write("wall", " wall %f", m[0])
    fm.write("comb", " comb %f", m[1] + m[2])
    fm.write("user", " user %f", m[1])
    fm.write("sys", " sys %f", m[2])
    fm.write("count", " (best of %d)", count)
    fm.plain("\n")
Пример #7
0
 def _command(self, *args):
     watchmanargs = (args[0], self._resolved_root) + args[1:]
     error = None
     needretry = False
     starttime = util.timer()
     try:
         if self._watchmanclient is None:
             if compat.PYTHON3:
                 encoding = "bser"
             else:
                 encoding = "bser-v1"
             self._firsttime = False
             self._watchmanclient = pywatchman.client(
                 sockpath=self._sockpath,
                 transport=self._transport,
                 tcpAddress=(self._tcp_host, self._tcp_port),
                 timeout=self._timeout,
                 recvEncoding=encoding,
                 sendEncoding=encoding,
                 useImmutableBser=True,
             )
         return self._watchmanclient.query(*watchmanargs)
     except pywatchman.CommandError as ex:
         error = ex.msg
         if "unable to resolve root" in ex.msg:
             raise WatchmanNoRoot(self._resolved_root, ex.msg)
         raise Unavailable(ex.msg)
     except pywatchman.SocketConnectError as ex:
         error = str(ex)
         # If fsmonitor.sockpath was specified in the configuration, we will
         # have skipped running `watchman get-sockname` which has the
         # consequence of not starting the watchman server up if it happens
         # to have been stopped.
         # Rather than just throwing up our hands in that situation, let's
         # clear the pre-configured sockpath so that the client will probe
         # and start it up.
         if not self._ui.config("fsmonitor", "sockpath") or self._sockpath is None:
             # Either sockpath wasn't configured, or we already tried clearing
             # it out, so let's propagate this error.
             raise Unavailable(str(ex))
         # Recurse and retry the command, and hopefully it will
         # start the server this time.
         self._sockpath = None
         self._watchmanclient = None
         needretry = True
     except pywatchman.WatchmanError as ex:
         error = str(ex)
         raise Unavailable(str(ex))
     finally:
         event = {
             "watchman": {
                 "args": args,
                 "duration_ms": int((util.timer() - starttime) * 1000),
             }
         }
         if error is not None:
             event["watchman"]["result"] = {"error": error}
         blackbox.log(event)
     if needretry:
         return self._command(*args)
Пример #8
0
    def updatecheckoutlocations(self, reponame, workspace, hostname, commit,
                                checkoutpath, sharedpath, unixname):
        self.ui.debug("sending 'update_checkout_locations' request\n",
                      component="commitcloud")
        path = "/commit_cloud/update_checkout_locations"
        data = {
            "repo_name": reponame,
            "workspace": workspace,
            "hostname": hostname,
            "commit": commit,
            "checkout_path": checkoutpath,
            "shared_path": sharedpath,
            "unixname": unixname,
        }
        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("response received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        self.ui.debug("'update_checkout_locations' successful",
                      component="commitcloud")
Пример #9
0
def checksshcommand(ui, url, opts):
    rui = hg.remoteui(ui, opts)
    sshcmd = rui.config("ui", "ssh")
    sshaddenv = dict(rui.configitems("sshenv"))
    sshenv = util.shellenviron(sshaddenv)
    args = util.sshargs(sshcmd, url.host, url.user, url.port)
    cmd = "%s %s %s" % (sshcmd, args, "hostname")
    ui.status(
        _("Testing SSH connection to the server: running 'hostname'\n"),
        component="debugnetwork",
    )
    ui.pushbuffer(subproc=True)
    starttime = util.timer()
    res = ui.system(cmd, blockedtag="debugnetwork", environ=sshenv)
    endtime = util.timer()
    hostname = pycompat.decodeutf8(ui.popbufferbytes()).strip()
    if res == 0:
        ui.status(
            _("Connected ok: %s\n") % util.timecount(endtime - starttime),
            component="debugnetwork",
        )
        ui.status(_("Server hostname is %s\n") % hostname,
                  component="debugnetwork")
        return True
    else:
        ui.status(_("Failed to connect: ssh returned %s\n") % res,
                  error=_("error"))
        return False
Пример #10
0
    def getsmartlog(self, reponame, workspace, repo, limit):
        self.ui.debug("sending 'get_smartlog' request\n",
                      component="commitcloud")

        path = "/commit_cloud/get_smartlog"
        data = {"repo_name": reponame, "workspace": workspace}

        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("responce received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        # if 200 OK response format is:
        # {
        #   "rc":0,
        #   "smartlog": <thrift structure SmartlogData serialized to json using Thrift JSON serialization>
        # }
        smartlog = response["smartlog"]
        if limit != 0:
            cutoff = int(time.time()) - limit
            smartlog["nodes"] = list(
                filter(lambda x: x["date"] >= cutoff, smartlog["nodes"]))
        self.ui.debug(
            "'get_smartlog' returns %d entries\n" % len(smartlog["nodes"]),
            component="commitcloud",
        )

        try:
            return self._makesmartloginfo(smartlog)
        except Exception as e:
            raise ccerror.UnexpectedError(self.ui, e)
Пример #11
0
 def uploadtest(_description, bytecount):
     body = bytecount * b"A"
     starttime = util.timer()
     conn.request(b"POST", b"/netspeedtest", body=body)
     res = conn.getresponse()
     while not res.complete():
         res.read(length=BLOCK_SIZE)
     endtime = util.timer()
     if res.status != 204:
         raise error.Abort("uploadtest: HTTP response status code != 204")
     return endtime - starttime
Пример #12
0
    def getsmartlogbyversion(self, reponame, workspace, repo, date, version,
                             limit):
        self.ui.debug("sending 'get_old_smartlog' request\n",
                      component="commitcloud")
        path = "/commit_cloud/get_smartlog_by_version"
        if date:
            data = {
                "repo_name": reponame,
                "workspace": workspace,
                "timestamp": date[0]
            }
        else:
            data = {
                "repo_name": reponame,
                "workspace": workspace,
                "version": version
            }

        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("response received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        # if 200 OK response format is:
        # {
        #   "rc":0,
        #   "smartlog": <thrift structure SmartlogData serialized to json using Thrift JSON serialization>
        # }
        smartlog = response["smartlog"]
        if limit != 0:
            cutoff = smartlog["timestamp"] - limit
            smartlog["nodes"] = list(
                filter(lambda x: x["date"] >= cutoff, smartlog["nodes"]))

        self.ui.debug(
            "'get_smartlog' returns %d entries\n" % len(smartlog["nodes"]),
            component="commitcloud",
        )

        nodes = self._makenodes(smartlog)
        try:
            firstbranch, dag = self._makefakedag(nodes, repo)
            return (
                firstbranch,
                dag,
                response["smartlog"]["version"],
                response["smartlog"]["timestamp"],
            )
        except Exception as e:
            raise ccerror.UnexpectedError(self.ui, e)
Пример #13
0
    def downloadtest(_description, bytecount):
        headers = {"x-netspeedtest-nbytes": bytecount}
        conn.request(b"GET", b"/netspeedtest", body=None, headers=headers)
        starttime = util.timer()
        res = conn.getresponse()
        while not res.complete():
            res.read(length=BLOCK_SIZE)
        endtime = util.timer()
        if res.status != 200:
            raise error.Abort("downloadtest: HTTP response status code != 200")

        return endtime - starttime
Пример #14
0
    def downloadtest(_description, bytecount):
        headers = {HEADER_NETSPEEDTEST_NBYTES: bytecount}
        conn.request(b"GET", b"/netspeedtest", body=None, headers=headers)
        starttime = util.timer()
        res = conn.getresponse()
        while not res.complete():
            res.read(length=BLOCK_SIZE)
        endtime = util.timer()
        if not httpstatussuccess(res.status):
            raise error.Abort("downloadtest: HTTP response status code: %s",
                              res.status)

        return endtime - starttime
Пример #15
0
 def latencytest(n):
     latencies = []
     while n > 0:
         conn.request(b"GET", b"/health_check", body=None)
         starttime = util.timer()
         res = conn.getresponse()
         while not res.complete():
             res.read(length=BLOCK_SIZE)
         endtime = util.timer()
         if res.status != 200:
             raise error.Abort("latencytest: HTTP response status code != 200")
         latencies.append(endtime - starttime)
         n -= 1
     return latencies
Пример #16
0
    def trace(line, ui=ui, shell=ipython):
        """run and print ASCII trace"""
        code = compile(line, "<magic-trace>", "exec")

        td = bindings.tracing.tracingdata()
        ns = shell.user_ns
        ns.update(globals())
        start = util.timer()
        _execwith(td, code, ns)
        durationmicros = (util.timer() - start) * 1e6
        # hide spans less than 50 microseconds, or 1% of the total time
        asciitrace = td.ascii(int(durationmicros / 100) + 50)
        ui.write_err("%s" % asciitrace)
        if not traceimport.enabled:
            ui.write_err("(use 'debugshell --trace' to enable more detailed trace)\n")
        return td
Пример #17
0
    def getreferences(self, reponame, workspace, baseversion):
        self.ui.debug("sending 'get_references' request\n",
                      component="commitcloud")

        # send request
        path = "/commit_cloud/get_references"
        data = {
            "base_version": baseversion,
            "repo_name": reponame,
            "workspace": workspace,
        }
        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("response received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        version = response["ref"]["version"]

        if version == 0:
            self.ui.debug(
                "'get_references' returns that workspace '%s' is not known by server\n"
                % workspace,
                component="commitcloud",
            )
            return baseservice.References(version, None, None, None, None,
                                          None, None)

        if version == baseversion:
            self.ui.debug(
                "'get_references' confirms the current version %s is the latest\n"
                % version,
                component="commitcloud",
            )
            return baseservice.References(version, None, None, None, None,
                                          None, None)

        self.ui.debug(
            "'get_references' returns version %s, current version %s\n" %
            (version, baseversion),
            component="commitcloud",
        )
        return self._makereferences(response["ref"])
Пример #18
0
    def getworkspaces(self, reponame, prefix):
        self.ui.debug("sending 'get_workspaces' request\n",
                      component="commitcloud")

        # send request
        path = "/commit_cloud/get_workspaces"
        data = {"repo_name": reponame, "prefix": prefix}
        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("response received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        workspaces = response["workspaces_data"]
        return self._makeworkspacesinfo(workspaces)
Пример #19
0
 def archiveworkspace(self, reponame, workspace):
     """Archive the given workspace
     """
     self.ui.debug("sending 'update_workspace_archive' request\n",
                   component="commitcloud")
     path = "/commit_cloud/update_workspace_archive"
     data = {
         "repo_name": reponame,
         "workspace": workspace,
         "archived": True
     }
     start = util.timer()
     response = self._send(path, data)
     elapsed = util.timer() - start
     self.ui.debug("response received in %0.2f sec\n" % elapsed,
                   component="commitcloud")
     if "error" in response:
         raise ccerror.ServiceError(self.ui, response["error"])
Пример #20
0
def debugserialgetfiles(ui, repo, **opts):
    edenapi.bailifdisabled(ui)

    input = (line.split() for line in sys.stdin.readlines())
    keys = [(path, node) for node, path in input]

    dpack, __ = repo.fileslog.getmutablesharedpacks()

    start = util.timer()
    for key in keys:
        stats = repo.edenapi.get_files([key], dpack)
        ui.write(stats.to_str() + "\n")
    end = util.timer()

    ui.write(_("elapsed time: %f s\n") % (end - start))

    packpath, __ = repo.fileslog._mutablesharedpacks.commit()
    ui.write(_("wrote datapack: %s\n") % packpath)
Пример #21
0
 def latencytest(count):
     # Use the upload endpoint for the latency test.  We will time how long it
     # takes for the server to return the "upload complete" response for a
     # single byte upload.
     latencies = []
     with progress.spinner(ui, "testing connection latency"):
         for i in range(count):
             pipeo.write(b"upload 1\n")
             pipeo.flush()
             l = pipei.readline()
             if l != b"upload bytes 1\n":
                 raise error.Abort("invalid response from server: %r" % l)
             starttime = util.timer()
             pipeo.write(b"\n")
             pipeo.flush()
             l = pipei.readline()
             endtime = util.timer()
             if l != b"upload complete\n":
                 raise error.Abort("invalid response from server: %r" % l)
             latencies.append(endtime - starttime)
     return latencies
Пример #22
0
def checkreachability(ui, url, addrinfos):
    ok = False
    for family, socktype, _proto, _canonname, sockaddr in addrinfos:
        ui.status(
            _("Testing connection to: %s %s\n") % (sockaddr[0], sockaddr[1]),
            component="debugnetwork",
        )
        try:
            starttime = util.timer()
            s = socket.socket(family, socktype)
            s.settimeout(1)
            s.connect(sockaddr)
            s.shutdown(socket.SHUT_RDWR)
            endtime = util.timer()
            ui.status(
                _("Connected ok: %s\n") % util.timecount(endtime - starttime),
                component="debugnetwork",
            )
            ok = True
        except Exception as e:
            ui.status(_("failed to connect to remote host: %s\n") % e, error=_("error"))
    return ok
Пример #23
0
def _sync(
    repo,
    cloudrefs=None,
    full=False,
    cloudversion=None,
    cloudworkspace=None,
    connect_opts=None,
    dest=None,
):
    ui = repo.ui
    start = util.timer()

    startnode = repo["."].node()

    if full:
        maxage = None
    else:
        maxage = ui.configint("commitcloud", "max_sync_age", None)

    # Work out which repo and workspace we are synchronizing with.
    reponame = ccutil.getreponame(repo)
    workspacename = workspace.currentworkspace(repo)
    if workspacename is None:
        raise ccerror.WorkspaceError(ui, _("undefined workspace"))

    # External services may know the workspacename to trigger the sync
    if cloudworkspace and workspacename != cloudworkspace:
        ui.status(_("current workspace is different than the workspace to sync\n"))
        return (1, None)

    # Connect to the commit cloud service.
    tokenlocator = token.TokenLocator(ui)
    serv = service.get(ui, tokenlocator.token)

    ui.status(
        _("synchronizing '%s' with '%s'\n") % (reponame, workspacename),
        component="commitcloud",
    )
    backuplock.progress(repo, "starting synchronizing with '%s'" % workspacename)

    # Work out what version to fetch updates from.
    lastsyncstate = syncstate.SyncState(repo, workspacename)
    fetchversion = lastsyncstate.version
    if maxage != lastsyncstate.maxage:
        # We are doing a full sync, or maxage has changed since the last sync,
        # so get a fresh copy of the full state.
        fetchversion = 0

    # External services may already know the version number.  Check if we're
    # already up-to-date.
    if cloudversion is not None and cloudversion <= lastsyncstate.version:
        ui.status(
            _("this version has been already synchronized\n"), component="commitcloud"
        )
        # It's possible that we have two cloud syncs for the same repo - one for edenfs backing repo
        # another is for edenfs checkout. If edenfs backing repo sync runs first then it will sync
        # all the commits and bookmarks but it won't move working copy of the checkout.
        # The line below makes sure that working copy is updated.
        return _maybeupdateworkingcopy(repo, startnode), None

    backupsnapshots = False
    try:
        extensions.find("snapshot")
        backupsnapshots = True
    except KeyError:
        pass

    origheads = _getheads(repo)
    origbookmarks = _getbookmarks(repo)

    readonly = not origheads and not origbookmarks
    remotepath = (
        ccutil.getremotereadpath(repo, dest)
        if readonly
        else ccutil.getremotepath(repo, dest)
    )
    getconnection = lambda: repo.connectionpool.get(
        remotepath, connect_opts, reason="cloudsync"
    )

    # Back up all local commits that are not already backed up.
    # Load the backup state under the repo lock to ensure a consistent view.
    with repo.lock():
        state = backupstate.BackupState(repo, remotepath)
    backedup, failed = backup._backup(
        repo, state, remotepath, getconnection, backupsnapshots=backupsnapshots
    )

    # Now that commits are backed up, check that visibleheads are enabled
    # locally, and only sync if visibleheads is enabled.
    # developer config: commitcloud.requirevisibleheads
    if repo.ui.configbool("commitcloud", "requirevisibleheads", True):
        if not visibility.enabled(repo):
            raise error.Abort(_("commit cloud sync requires new-style visibility"))

    # On cloud rejoin we already know what the cloudrefs are.  Otherwise,
    # fetch them from the commit cloud service.
    if cloudrefs is None:
        cloudrefs = serv.getreferences(reponame, workspacename, fetchversion)

    with repo.ui.configoverride(
        {("treemanifest", "prefetchdraftparents"): False}, "cloudsync"
    ), repo.wlock(), repo.lock():

        if origheads != _getheads(repo) or origbookmarks != _getbookmarks(repo):
            # Another transaction changed the repository while we were backing
            # up commits. This may have introduced new commits that also need
            # backing up.  That transaction should have started its own sync
            # process, so give up on this sync, and let the later one perform
            # the sync.
            raise ccerror.SynchronizationError(ui, _("repo changed while backing up"))

        synced = False
        attempt = 0
        while not synced:

            if attempt >= 3:
                raise ccerror.SynchronizationError(
                    ui, _("failed to sync after %s attempts") % attempt
                )
            attempt += 1

            with repo.transaction("cloudsync") as tr:

                # Apply any changes from the cloud to the local repo.
                if cloudrefs.version != fetchversion:
                    _applycloudchanges(
                        repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr
                    )
                elif (
                    _isremotebookmarkssyncenabled(repo.ui)
                    and not lastsyncstate.remotebookmarks
                ):
                    # We're up-to-date, but didn't sync remote bookmarks last time.
                    # Sync them now.
                    cloudrefs = serv.getreferences(reponame, workspacename, 0)
                    _forcesyncremotebookmarks(
                        repo, cloudrefs, lastsyncstate, remotepath, tr
                    )

                # Check if any omissions are now included in the repo
                _checkomissions(repo, remotepath, lastsyncstate, tr)

            # We committed the transaction so that data downloaded from the cloud is
            # committed.  Start a new transaction for uploading the local changes.
            with repo.transaction("cloudsync") as tr:

                # Send updates to the cloud.  If this fails then we have lost the race
                # to update the server and must start again.
                synced, cloudrefs = _submitlocalchanges(
                    repo, reponame, workspacename, lastsyncstate, failed, serv, tr
                )

    # Update the backup bookmarks with any changes we have made by syncing.
    backupbookmarks.pushbackupbookmarks(repo, remotepath, getconnection, state)

    backuplock.progresscomplete(repo)

    if failed:
        failedset = set(repo.nodes("%ld::", failed))
        if len(failedset) == 1:
            repo.ui.warn(
                _("failed to synchronize %s\n") % nodemod.short(failedset.pop()),
                component="commitcloud",
            )
        else:
            repo.ui.warn(
                _("failed to synchronize %d commits\n") % len(failedset),
                component="commitcloud",
            )
    else:
        ui.status(_("commits synchronized\n"), component="commitcloud")

    elapsed = util.timer() - start
    ui.status(_("finished in %0.2f sec\n") % elapsed)

    # Check that Scm Service is running and a subscription exists
    subscription.check(repo)

    return _maybeupdateworkingcopy(repo, startnode), synced and not failed
Пример #24
0
    def _batch(self, pointers, localstore, action, objectnames=None):
        if action not in ["upload", "download"]:
            raise error.ProgrammingError("invalid Git-LFS action: %s" % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get("size", 0) for x in objects)
        perftrace.tracebytes("Size", total)
        sizes = {}
        for obj in objects:
            sizes[obj.get("oid")] = obj.get("size", 0)
        topic = {
            "upload": _("lfs uploading"),
            "download": _("lfs downloading")
        }[action]
        if self.ui.verbose and len(objects) > 1:
            self.ui.write(
                _("lfs: need to transfer %d objects (%s)\n") %
                (len(objects), util.bytecount(total)))

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get("size", 0)
                if self.ui.verbose:
                    if action == "download":
                        msg = _("lfs: downloading %s (%s)\n")
                    elif action == "upload":
                        msg = _("lfs: uploading %s (%s)\n")
                    self.ui.write(msg %
                                  (obj.get("oid"), util.bytecount(objsize)))
                retry = self.retry
                while True:
                    try:
                        yield 0, obj.get("oid")
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get("oid")
                        break
                    except Exception as ex:
                        if retry > 0:
                            if self.ui.verbose:
                                self.ui.write(
                                    _("lfs: failed: %r (remaining retry %d)\n")
                                    % (ex, retry))
                            retry -= 1
                            continue
                        raise

        starttimestamp = util.timer()
        if action == "download":
            oids = worker.worker(
                self.ui,
                0.1,
                transfer,
                (),
                sorted(objects, key=lambda o: o.get("oid")),
                preferthreads=True,
                callsite="blobstore",
            )
        else:
            oids = transfer(objects)

        transferred = 0
        with progress.bar(self.ui,
                          topic,
                          _("bytes"),
                          total=total,
                          formatfunc=util.bytecount) as prog:
            for count, oid in oids:
                if count != 0:
                    transferred += sizes[oid]
                    if self.ui.verbose:
                        self.ui.write(_("lfs: processed: %s\n") % oid)
                if objectnames is not None:
                    prog.value = (transferred, objectnames.get(oid, ""))
                else:
                    prog.value = transferred

        currenttimestamp = util.timer()
        self._metrics["lfs_%s_size" % action] += total
        self._metrics["lfs_%s_time" % action] += (currenttimestamp - max(
            self._timestamp["latest_%s_timestamp" % action],
            starttimestamp)) * 1000
        self._timestamp["latest_%s_timestamp" % action] = currenttimestamp
Пример #25
0
def _sync(
    repo, cloudrefs=None, full=False, cloudversion=None, connect_opts=None, dest=None
):
    ui = repo.ui
    start = util.timer()

    remotepath = ccutil.getremotepath(repo, dest)
    getconnection = lambda: repo.connectionpool.get(remotepath, connect_opts)

    startnode = repo["."].node()

    if full:
        maxage = None
    else:
        maxage = ui.configint("commitcloud", "max_sync_age", None)

    # Work out which repo and workspace we are synchronizing with.
    reponame = ccutil.getreponame(repo)
    workspacename = workspace.currentworkspace(repo)
    if workspacename is None:
        raise ccerror.WorkspaceError(ui, _("undefined workspace"))

    # Connect to the commit cloud service.
    tokenlocator = token.TokenLocator(ui)
    serv = service.get(ui, tokenlocator.token)

    ui.status(
        _("synchronizing '%s' with '%s'\n") % (reponame, workspacename),
        component="commitcloud",
    )
    backuplock.progress(repo, "starting synchronizing with '%s'" % workspacename)

    # Work out what version to fetch updates from.
    lastsyncstate = syncstate.SyncState(repo, workspacename)
    fetchversion = lastsyncstate.version
    if maxage != lastsyncstate.maxage:
        # We are doing a full sync, or maxage has changed since the last sync,
        # so get a fresh copy of the full state.
        fetchversion = 0

    # External services may already know the version number.  Check if we're
    # already up-to-date.
    if cloudversion is not None and cloudversion <= lastsyncstate.version:
        ui.status(
            _("this version has been already synchronized\n"), component="commitcloud"
        )
        return 0

    backupsnapshots = False
    try:
        extensions.find("snapshot")
        backupsnapshots = True
    except KeyError:
        pass

    origheads = _getheads(repo)
    origbookmarks = _getbookmarks(repo)

    # Back up all local commits that are not already backed up.
    # Load the backup state under the repo lock to ensure a consistent view.
    with repo.lock():
        state = backupstate.BackupState(repo, remotepath)
    backedup, failed = backup._backup(
        repo, state, remotepath, getconnection, backupsnapshots=backupsnapshots
    )

    # On cloud rejoin we already know what the cloudrefs are.  Otherwise,
    # fetch them from the commit cloud service.
    if cloudrefs is None:
        cloudrefs = serv.getreferences(reponame, workspacename, fetchversion)

    with repo.ui.configoverride(
        {("treemanifest", "prefetchdraftparents"): False}, "cloudsync"
    ), repo.wlock(), repo.lock(), repo.transaction("cloudsync") as tr:

        if origheads != _getheads(repo) or origbookmarks != _getbookmarks(repo):
            # Another transaction changed the repository while we were backing
            # up commits. This may have introduced new commits that also need
            # backing up.  That transaction should have started its own sync
            # process, so give up on this sync, and let the later one perform
            # the sync.
            raise ccerror.SynchronizationError(ui, _("repo changed while backing up"))

        synced = False
        while not synced:

            # Apply any changes from the cloud to the local repo.
            if cloudrefs.version != fetchversion:
                _applycloudchanges(
                    repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr
                )

            # Check if any omissions are now included in the repo
            _checkomissions(repo, remotepath, lastsyncstate)

            # Send updates to the cloud.  If this fails then we have lost the race
            # to update the server and must start again.
            synced, cloudrefs = _submitlocalchanges(
                repo, reponame, workspacename, lastsyncstate, failed, serv
            )

    # Update the backup bookmarks with any changes we have made by syncing.
    backupbookmarks.pushbackupbookmarks(repo, remotepath, getconnection, state)

    backuplock.progresscomplete(repo)

    if failed:
        failedset = set(repo.nodes("%ld::", failed))
        if len(failedset) == 1:
            repo.ui.warn(
                _("failed to synchronize %s\n") % nodemod.short(failedset.pop()),
                component="commitcloud",
            )
        else:
            repo.ui.warn(
                _("failed to synchronize %d commits\n") % len(failedset),
                component="commitcloud",
            )
    else:
        ui.status(_("commits synchronized\n"), component="commitcloud")

    elapsed = util.timer() - start
    ui.status(_("finished in %0.2f sec\n") % elapsed)

    # Check that Scm Service is running and a subscription exists
    subscription.check(repo)

    # log whether the sync was successful
    with repo.wlock():
        fp = repo.localvfs("lastsync.log", "w+")
        if synced and not failed:
            fp.write("Success")
        else:
            fp.write("Failed")
        fp.close()
    return _maybeupdateworkingcopy(repo, startnode)
Пример #26
0
    def updatereferences(
        self,
        reponame,
        workspace,
        version,
        oldheads=None,
        newheads=None,
        oldbookmarks=None,
        newbookmarks=None,
        newobsmarkers=None,
        oldremotebookmarks=None,
        newremotebookmarks=None,
        oldsnapshots=None,
        newsnapshots=None,
        logopts={},
    ):
        self.ui.debug("sending 'update_references' request\n",
                      component="commitcloud")
        oldheads = oldheads or []
        newheads = newheads or []
        oldbookmarks = oldbookmarks or []
        newbookmarks = newbookmarks or {}
        newobsmarkers = newobsmarkers or []
        oldremotebookmarks = oldremotebookmarks or []
        newremotebookmarks = newremotebookmarks or {}
        oldsnapshots = oldsnapshots or []
        newsnapshots = newsnapshots or []
        self.ui.log("commitcloud_updates",
                    version=version,
                    repo=reponame,
                    workspace=workspace,
                    oldheadcount=len(oldheads),
                    newheadcount=len(newheads),
                    oldbookmarkcount=len(oldremotebookmarks),
                    newbookmarkcount=len(newbookmarks),
                    oldremotebookmarkcount=len(oldremotebookmarks),
                    newremotebookmarkcount=len(newremotebookmarks),
                    **logopts)

        # remove duplicates, must preserve order in the newheads list
        newheadsset = set(newheads)
        commonset = set([item for item in oldheads if item in newheadsset])

        newheads = filter(lambda h: h not in commonset, newheads)
        oldheads = filter(lambda h: h not in commonset, oldheads)

        # send request
        path = "/commit_cloud/update_references"

        data = {
            "version":
            version,
            "repo_name":
            reponame,
            "workspace":
            workspace,
            "removed_heads":
            oldheads,
            "new_heads":
            newheads,
            "removed_bookmarks":
            oldbookmarks,
            "updated_bookmarks":
            newbookmarks,
            "new_obsmarkers_data":
            self._encodedmarkers(newobsmarkers),
            "removed_remote_bookmarks":
            self._makeremotebookmarks(oldremotebookmarks),
            "updated_remote_bookmarks":
            self._makeremotebookmarks(newremotebookmarks),
            "removed_snapshots":
            oldsnapshots,
            "new_snapshots":
            newsnapshots,
        }

        start = util.timer()
        response = self._send(path, data)
        elapsed = util.timer() - start
        self.ui.debug("response received in %0.2f sec\n" % elapsed,
                      component="commitcloud")

        if "error" in response:
            raise ccerror.ServiceError(self.ui, response["error"])

        data = response["ref"]
        rc = response["rc"]
        newversion = data["version"]

        if rc != 0:
            self.ui.debug(
                "'update_references' rejected update, current version %d is old, "
                "client needs to sync to version %d first\n" %
                (version, newversion),
                component="commitcloud",
            )
            return False, self._makereferences(data)

        self.ui.debug(
            "'update_references' accepted update, old version is %d, new version is %d\n"
            % (version, newversion),
            component="commitcloud",
        )

        return (
            True,
            baseservice.References(newversion, None, None, None, None, None,
                                   None),
        )