def rebalance(self, request: AllocateRequest) -> AllocateResponse: log.info("Ignoring attempt to rebalance workloads: '{}'".format( request.get_workloads())) return AllocateResponse( request.get_cpu(), get_workload_allocations(request.get_cpu(), list(request.get_workloads().values())), self.get_name())
def rebalance(self, request: AllocateRequest) -> AllocateResponse: cpu = request.get_cpu() workloads = request.get_workloads() metadata = {} cpu = rebalance(cpu, workloads, self.__free_thread_provider, metadata) return AllocateResponse( cpu, get_workload_allocations(cpu, workloads.values()), self.get_name(), metadata)
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: log.info("Ignoring attempt to free threads for workload: '{}'".format( request.get_workload_id())) return AllocateResponse( request.get_cpu(), get_workload_allocations(request.get_cpu(), list(request.get_workloads().values())), self.get_name())
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workload = request.get_workloads()[request.get_workload_id()] for t in cpu.get_threads(): t.free(workload.get_id()) return AllocateResponse( cpu, get_workload_allocations(cpu, request.get_workloads().values()), self.get_name())
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workload = request.get_workloads()[request.get_workload_id()] threads = self._get_assign_threads(cpu, workload.get_thread_count()) for t in threads: t.claim(workload.get_id()) return AllocateResponse( cpu, get_workload_allocations(cpu, request.get_workloads().values()), self.get_name())
def rebalance(self, request: AllocateRequest) -> AllocateResponse: self.__call_meta = {} cpu = request.get_cpu() cpu_usage = request.get_cpu_usage() workloads = request.get_workloads() self.__cnt_rebalance_calls += 1 if len(workloads) == 0: log.warning("Ignoring rebalance of empty CPU.") self.__call_meta['rebalance_empty'] = 1 return AllocateResponse( cpu, get_workload_allocations(cpu, list(workloads.values())), self.get_name(), self.__call_meta) log.info("Rebalancing with predictions...") curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids() return AllocateResponse( self.__compute_allocation(cpu, None, workloads, curr_ids_per_workload, cpu_usage, None), get_workload_allocations(cpu, list(workloads.values())), self.get_name(), self.__call_meta)
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: self.__call_meta = {} cpu = request.get_cpu() cpu_usage = request.get_cpu_usage() workloads = request.get_workloads() workload_id = request.get_workload_id() curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids() return AllocateResponse( self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, True), get_workload_allocations(cpu, list(workloads.values())), self.get_name(), self.__call_meta)
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workloads = request.get_workloads() workload_id = request.get_workload_id() burst_workloads = get_burst_workloads(workloads.values()) release_all_threads(cpu, burst_workloads) if workloads[workload_id].get_type() == STATIC: self.__free_threads(cpu, workload_id, workloads) workloads.pop(workload_id) metadata = {} update_burst_workloads(cpu, workloads, self.__free_thread_provider, metadata) return AllocateResponse( cpu, get_workload_allocations(cpu, workloads.values()), self.get_name(), metadata)
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: self.__call_meta = {} cpu = request.get_cpu() cpu_usage = request.get_cpu_usage() workloads = request.get_workloads() workload_id = request.get_workload_id() curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids() if workload_id not in curr_ids_per_workload: raise Exception( "workload_id=`%s` is not placed on the instance. Cannot free it." % (workload_id, )) return AllocateResponse( self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, False), get_workload_allocations(cpu, list(workloads.values())), self.get_name(), self.__call_meta)
def rebalance(self, request: AllocateRequest) -> AllocateResponse: return AllocateResponse( request.get_cpu(), get_workload_allocations(request.get_cpu(), request.get_workloads().values()), self.get_name())