Ejemplo n.º 1
0
 def rebalance(self, request: AllocateRequest) -> AllocateResponse:
     log.info("Ignoring attempt to rebalance workloads: '{}'".format(
         request.get_workloads()))
     return AllocateResponse(
         request.get_cpu(),
         get_workload_allocations(request.get_cpu(),
                                  list(request.get_workloads().values())),
         self.get_name())
Ejemplo n.º 2
0
    def __process(self, request: AllocateRequest, req_type: str,
                  is_delete: bool) -> AllocateResponse:
        req_wid = ''
        if isinstance(request, AllocateThreadsRequest):
            req_wid = request.get_workload_id()
        req = self.__build_base_req(request.get_cpu())
        req.metadata[
            REQ_TYPE_METADATA_KEY] = req_type  # for logging purposes server side

        for wid, w in request.get_workloads().items():
            req.task_to_job_id[wid] = w.get_job_id()
            if is_delete and wid == req_wid:
                continue
            req.tasks_to_place.append(wid)

        try:
            log.info("remote %s (tasks_to_place=%s)", req_type,
                     req.tasks_to_place)
            response = self.__stub.ComputeIsolation(
                req, timeout=self.__call_timeout_secs)
        except grpc.RpcError as e:
            log.error("remote %s failed (tasks_to_place=%s):\n%s", req_type,
                      req.tasks_to_place, repr(e))
            raise e

        try:
            return self.__deser(response)
        except Exception as e:
            log.error("failed to deseralize response for remote %s of %s:\n%s",
                      req_type, req_wid, repr(e))
            raise e
Ejemplo n.º 3
0
    def rebalance(self, request: AllocateRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workloads = request.get_workloads()

        metadata = {}
        cpu = rebalance(cpu, workloads, self.__free_thread_provider, metadata)
        return AllocateResponse(
            cpu, get_workload_allocations(cpu, workloads.values()),
            self.get_name(), metadata)
    def rebalance(self, request: AllocateRequest) -> AllocateResponse:
        self.__call_meta = {}
        cpu = request.get_cpu()
        cpu_usage = request.get_cpu_usage()
        workloads = request.get_workloads()
        self.__cnt_rebalance_calls += 1

        if len(workloads) == 0:
            log.warning("Ignoring rebalance of empty CPU.")
            self.__call_meta['rebalance_empty'] = 1
            return AllocateResponse(cpu, self.get_name(), self.__call_meta)

        log.info("Rebalancing with predictions...")
        curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids()

        return AllocateResponse(
            self.__compute_allocation(cpu, None, workloads, curr_ids_per_workload, cpu_usage, None),
            self.get_name(),
            self.__call_meta)
Ejemplo n.º 5
0
 def rebalance(self, request: AllocateRequest) -> AllocateResponse:
     return AllocateResponse(
         request.get_cpu(),
         get_workload_allocations(request.get_cpu(),
                                  request.get_workloads().values()),
         self.get_name())
Ejemplo n.º 6
0
    def rebalance(self, request: AllocateRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workloads = request.get_workloads()

        cpu = rebalance(cpu, workloads, self.__free_thread_provider)
        return AllocateResponse(cpu, self.get_name())
Ejemplo n.º 7
0
 def rebalance(self, request: AllocateRequest) -> AllocateResponse:
     return AllocateResponse(request.get_cpu(), self.get_name())