def free_threads(self,
                     request: AllocateThreadsRequest) -> AllocateResponse:
        url = "{}/free_threads".format(self.__url)
        body = request.to_dict()

        try:
            log.info("freeing threads remotely for workload: %s",
                     request.get_workload_id())
            response = requests.put(url,
                                    json=body,
                                    headers=self.__headers,
                                    timeout=self.__timeout)
        except requests.exceptions.Timeout as e:
            log.error("freeing threads remotely for workload: %s timed out",
                      request.get_workload_id())
            raise e

        if response.status_code == 200:
            log.info(
                "freed threads remotely with response code: %s for workload: %s",
                response.status_code, request.get_workload_id())
            return deserialize_response(response.headers, response.json())

        log.error(
            "failed to free threads remotely for workload: %s with status code: %d",
            request.get_workload_id(), response.status_code)
        raise CpuAllocationException("Failed to free threads: {}".format(
            response.text))
Ejemplo n.º 2
0
    def assign_threads(self,
                       request: AllocateThreadsRequest) -> AllocateResponse:
        thread_count = len(request.get_cpu().get_threads())
        thread_ids = list(range(thread_count))

        log.info(
            "Setting cpuset.cpus to ALL cpus: '{}' for workload: '{}'".format(
                thread_ids, request.get_workload_id()))
        self.__cgroup_manager.set_cpuset(request.get_workload_id(), thread_ids)

        return AllocateResponse(request.get_cpu(), self.get_name())
Ejemplo n.º 3
0
 def free_threads(self,
                  request: AllocateThreadsRequest) -> AllocateResponse:
     log.info("Ignoring attempt to free threads for workload: '{}'".format(
         request.get_workload_id()))
     return AllocateResponse(
         request.get_cpu(),
         get_workload_allocations(request.get_cpu(),
                                  list(request.get_workloads().values())),
         self.get_name())
Ejemplo n.º 4
0
    def free_threads(self,
                     request: AllocateThreadsRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workload = request.get_workloads()[request.get_workload_id()]

        for t in cpu.get_threads():
            t.free(workload.get_id())

        return AllocateResponse(cpu, self.get_name())
Ejemplo n.º 5
0
    def assign_threads(self,
                       request: AllocateThreadsRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workload = request.get_workloads()[request.get_workload_id()]
        threads = self._get_assign_threads(cpu, workload.get_thread_count())
        for t in threads:
            t.claim(workload.get_id())

        return AllocateResponse(cpu, self.get_name())
    def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:
        self.__call_meta = {}
        cpu = request.get_cpu()
        cpu_usage = request.get_cpu_usage()
        workloads = request.get_workloads()
        workload_id = request.get_workload_id()
        curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids()

        return AllocateResponse(
            self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, True),
            self.get_name(),
            self.__call_meta)
Ejemplo n.º 7
0
    def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workloads = request.get_workloads()
        workload_id = request.get_workload_id()

        burst_workloads = get_burst_workloads(workloads.values())
        release_all_threads(cpu, burst_workloads)
        if workloads[workload_id].get_type() == STATIC:
            self.__assign_threads(cpu, workloads[workload_id])
        update_burst_workloads(cpu, workloads, self.__free_thread_provider)

        return AllocateResponse(cpu, self.get_name())
Ejemplo n.º 8
0
 def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:
     try:
         self.__primary_free_threads_call_count += 1
         return self.__primary_allocator.free_threads(request)
     except:
         log.exception(
             "Failed to free threads for workload: '{}' with primary allocator: '{}', falling back to: '{}'".format(
                 request.get_workload_id(),
                 self.__primary_allocator.__class__.__name__,
                 self.__secondary_allocator.__class__.__name__))
         self.__secondary_free_threads_call_count += 1
         return self.__secondary_allocator.free_threads(request)
 def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:
     try:
         self.__primary_free_threads_call_count += 1
         self.__should_fallback_immediately()
         return self.__primary_allocator.free_threads(request)
     except Exception as e:
         log.error(
             "Failed to free threads for workload: '{}' with primary allocator: '{}', falling back to: '{}' because '{}'".format(
                 request.get_workload_id(),
                 self.__primary_allocator.__class__.__name__,
                 self.__secondary_allocator.__class__.__name__,
                 e))
         self.__secondary_free_threads_call_count += 1
         return self.__secondary_allocator.free_threads(request)
    def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:
        self.__call_meta = {}
        cpu = request.get_cpu()
        cpu_usage = request.get_cpu_usage()
        workloads = request.get_workloads()
        workload_id = request.get_workload_id()
        curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids()

        if workload_id not in curr_ids_per_workload:
            raise Exception("workload_id=`%s` is not placed on the instance. Cannot free it." % (workload_id,))

        return AllocateResponse(
            self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, False),
            self.get_name(),
            self.__call_meta)
Ejemplo n.º 11
0
    def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workloads = request.get_workloads()
        workload_id = request.get_workload_id()

        burst_workloads = get_burst_workloads(workloads.values())
        release_all_threads(cpu, burst_workloads)
        for t in cpu.get_threads():
            if workload_id in t.get_workload_ids():
                t.free(workload_id)

        workloads.pop(workload_id)
        update_burst_workloads(cpu, workloads, self.__free_thread_provider)

        return AllocateResponse(cpu, self.get_name())
Ejemplo n.º 12
0
    def free_threads(self,
                     request: AllocateThreadsRequest) -> AllocateResponse:
        cpu = request.get_cpu()
        workloads = request.get_workloads()
        workload_id = request.get_workload_id()

        burst_workloads = get_burst_workloads(workloads.values())
        release_all_threads(cpu, burst_workloads)
        if workloads[workload_id].get_type() == STATIC:
            self.__free_threads(cpu, workload_id, workloads)
        workloads.pop(workload_id)
        metadata = {}
        update_burst_workloads(cpu, workloads, self.__free_thread_provider,
                               metadata)

        return AllocateResponse(
            cpu, get_workload_allocations(cpu, workloads.values()),
            self.get_name(), metadata)
Ejemplo n.º 13
0
 def assign_threads(self,
                    request: AllocateThreadsRequest) -> AllocateResponse:
     log.info("Ignoring attempt to assign threads to workload: '{}'".format(
         request.get_workload_id()))
     return AllocateResponse(request.get_cpu(), self.get_name())