def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: url = "{}/free_threads".format(self.__url) body = request.to_dict() try: log.info("freeing threads remotely for workload: %s", request.get_workload_id()) response = requests.put(url, json=body, headers=self.__headers, timeout=self.__timeout) except requests.exceptions.Timeout as e: log.error("freeing threads remotely for workload: %s timed out", request.get_workload_id()) raise e if response.status_code == 200: log.info( "freed threads remotely with response code: %s for workload: %s", response.status_code, request.get_workload_id()) return deserialize_response(response.headers, response.json()) log.error( "failed to free threads remotely for workload: %s with status code: %d", request.get_workload_id(), response.status_code) raise CpuAllocationException("Failed to free threads: {}".format( response.text))
def test_shared_core_violation(self): allocator = IntegerProgramCpuAllocator() # Claim all thread but one cpu = get_cpu() w = get_test_workload(uuid.uuid4(), len(cpu.get_threads()) - 1, STATIC) workloads = {w.get_id(): w} request = AllocateThreadsRequest(cpu, w.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info("{}".format(cpu)) violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(0, len(violations)) # Assign another workload which will force core sharing w = get_test_workload(uuid.uuid4(), 1, STATIC) workloads[w.get_id()] = w request = AllocateThreadsRequest(cpu, w.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info("{}".format(cpu)) violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(1, len(violations))
def test_assign_more_than_available_threads_with_two_workloads(self): for allocator in OVER_ALLOCATORS: cpu = get_cpu() w_fill = get_test_workload("fill", DEFAULT_TOTAL_THREAD_COUNT, STATIC) w_extra = get_test_workload("extra", DEFAULT_TOTAL_THREAD_COUNT * 1.5, STATIC) request = AllocateThreadsRequest(cpu, w_fill.get_id(), {w_fill.get_id(): w_fill}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info(cpu) self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_claimed_threads())) self.assertEqual([w_fill.get_id()], list(cpu.get_workload_ids_to_thread_ids().keys())) request = AllocateThreadsRequest(cpu, w_extra.get_id(), { w_fill.get_id(): w_fill, w_extra.get_id(): w_extra }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info(cpu) self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_claimed_threads())) self.assertEqual( sorted([w_fill.get_id(), w_extra.get_id()]), sorted(list(cpu.get_workload_ids_to_thread_ids().keys())))
def test_forecast_threshold_no_usage(self): allocator = ForecastIPCpuAllocator( TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})), ThresholdFreeThreadProvider(0.1)) thread_count = DEFAULT_TOTAL_THREAD_COUNT / 2 cpu = get_cpu() w0 = get_test_workload(uuid.uuid4(), thread_count, STATIC) request = AllocateThreadsRequest(cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info(cpu) # All cores should be occupied for c in cpu.get_cores(): self.assertEqual(1, len(c.get_empty_threads())) w1 = get_test_workload(uuid.uuid4(), thread_count, BURST) request = AllocateThreadsRequest(cpu, w1.get_id(), { w0.get_id(): w0, w1.get_id(): w1 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info(cpu) # No threads should be shared for c in cpu.get_cores(): self.assertEqual(c.get_threads()[0].get_workload_ids(), c.get_threads()[1].get_workload_ids())
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workload = request.get_workloads()[request.get_workload_id()] for t in cpu.get_threads(): t.free(workload.get_id()) return AllocateResponse(cpu, self.get_name())
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: log.info("Ignoring attempt to free threads for workload: '{}'".format( request.get_workload_id())) return AllocateResponse( request.get_cpu(), get_workload_allocations(request.get_cpu(), list(request.get_workloads().values())), self.get_name())
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workload = request.get_workloads()[request.get_workload_id()] threads = self._get_assign_threads(cpu, workload.get_thread_count()) for t in threads: t.claim(workload.get_id()) return AllocateResponse(cpu, self.get_name())
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: thread_count = len(request.get_cpu().get_threads()) thread_ids = list(range(thread_count)) log.info( "Setting cpuset.cpus to ALL cpus: '{}' for workload: '{}'".format( thread_ids, request.get_workload_id())) self.__cgroup_manager.set_cpuset(request.get_workload_id(), thread_ids) return AllocateResponse(request.get_cpu(), self.get_name())
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workloads = request.get_workloads() workload_id = request.get_workload_id() burst_workloads = get_burst_workloads(workloads.values()) release_all_threads(cpu, burst_workloads) if workloads[workload_id].get_type() == STATIC: self.__assign_threads(cpu, workloads[workload_id]) update_burst_workloads(cpu, workloads, self.__free_thread_provider) return AllocateResponse(cpu, self.get_name())
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: self.__call_meta = {} cpu = request.get_cpu() cpu_usage = request.get_cpu_usage() workloads = request.get_workloads() workload_id = request.get_workload_id() curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids() return AllocateResponse( self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, True), self.get_name(), self.__call_meta)
def test_assign_free_threads(self): cpu = get_cpu() workload = get_test_workload("a", 2, STATIC) workloads = {workload.get_id(): workload} cpu_allocator = GreedyCpuAllocator() self.__set_cpu_allocator(cpu_allocator) # Assign threads log.info("Assign threads") cpu_in_0 = copy.deepcopy(cpu) request = AllocateThreadsRequest(cpu_in_0, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu_out_0 = cpu_allocator.assign_threads(request).get_cpu() cpu_in_1 = copy.deepcopy(cpu) body = AllocateThreadsRequest(cpu_in_1, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA).to_dict() cpu_out_1 = self.client.put("/assign_threads", data=json.dumps(body), content_type='application/json') cpu_out_1 = deserialize_response(cpu_out_1.headers, cpu_out_1.json).get_cpu() log.info("cpu_out_0: {}".format(cpu_out_0)) log.info("cpu_out_1: {}".format(cpu_out_1)) self.assertEqual(cpu_out_0.to_dict(), cpu_out_1.to_dict()) # Free threads log.info("Free threads") cpu_in_0 = copy.deepcopy(cpu_out_0) request = AllocateThreadsRequest(cpu_in_0, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu_out_0 = cpu_allocator.free_threads(request).get_cpu() cpu_in_1 = copy.deepcopy(cpu_out_1) body = AllocateThreadsRequest(cpu_in_1, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA).to_dict() cpu_out_1 = self.client.put("/free_threads", data=json.dumps(body), content_type='application/json') cpu_out_1 = deserialize_response(cpu_out_1.headers, cpu_out_1.json).get_cpu() log.info("cpu_out_0: {}".format(cpu_out_0)) log.info("cpu_out_1: {}".format(cpu_out_1)) self.assertEqual(cpu_out_0.to_dict(), cpu_out_1.to_dict())
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workloads = request.get_workloads() workload_id = request.get_workload_id() burst_workloads = get_burst_workloads(workloads.values()) release_all_threads(cpu, burst_workloads) for t in cpu.get_threads(): if workload_id in t.get_workload_ids(): t.free(workload_id) workloads.pop(workload_id) update_burst_workloads(cpu, workloads, self.__free_thread_provider) return AllocateResponse(cpu, self.get_name())
def test_no_change_populated_cpu(self): w0 = get_test_workload(uuid.uuid4(), 4, STATIC) cur_cpu = get_cpu() new_cpu = get_cpu() allocator0 = GreedyCpuAllocator() request = AllocateThreadsRequest(cur_cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) cur_cpu = allocator0.assign_threads(request).get_cpu() allocator1 = GreedyCpuAllocator() request = AllocateThreadsRequest(new_cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) new_cpu = allocator1.assign_threads(request).get_cpu() self.assertFalse(has_better_isolation(cur_cpu, new_cpu))
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: self.__call_meta = {} cpu = request.get_cpu() cpu_usage = request.get_cpu_usage() workloads = request.get_workloads() workload_id = request.get_workload_id() curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids() if workload_id not in curr_ids_per_workload: raise Exception("workload_id=`%s` is not placed on the instance. Cannot free it." % (workload_id,)) return AllocateResponse( self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, False), self.get_name(), self.__call_meta)
def test_external_cpu_manipulation(self): cpu = get_cpu() violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(0, len(violations)) # Claim 1 thread on every core dummy_workload_id = uuid.uuid4() for p in cpu.get_packages(): for c in p.get_cores(): c.get_threads()[0].claim(dummy_workload_id) violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(0, len(violations)) # Assign another workload which will force core sharing allocator = GreedyCpuAllocator() w = get_test_workload(uuid.uuid4(), 2, STATIC) workloads = {w.get_id(): w} request = AllocateThreadsRequest(cpu, w.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(2, len(violations))
def test_fill_cpu(self): """ Workload 0: 8 cores Workload 1: 4 cores Workload 2: 2 cores Workload 3: 1 core Workload 4: 1 core -------------------- Total: 16 cores """ for allocator in ALLOCATORS: cpu = get_cpu() workloads = [ get_test_workload("a", 8, STATIC), get_test_workload("b", 4, STATIC), get_test_workload("c", 2, STATIC), get_test_workload("d", 1, STATIC), get_test_workload("e", 1, STATIC) ] tot_req = 0 workload_map = {} for w in workloads: workload_map[w.get_id()] = w request = AllocateThreadsRequest( cpu, w.get_id(), workload_map, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.debug("{}".format(cpu)) tot_req += w.get_thread_count() self.assertEqual(tot_req, len(cpu.get_claimed_threads()))
def __get_threads_request(self, workload_id, workload_map, request_type): return AllocateThreadsRequest( self.get_cpu_copy(), workload_id, workload_map, self.__get_cpu_usage(), self.__get_request_metadata(request_type))
def test_assign_ten_threads_empty_cpu_ip(self): """ Workload 0: 10 threads --> (p:0 c:[0-7] t:[0-9]) | 1 | 1 | 1 | 1 | | 1 | 1 | | | | ------------- | | 1 | 1 | 1 | 1 | | | | | | """ for allocator in [ IntegerProgramCpuAllocator(), forecast_ip_alloc_simple ]: cpu = get_cpu() w = get_test_workload(uuid.uuid4(), 10, STATIC) request = AllocateThreadsRequest(cpu, w.get_id(), {w.get_id(): w}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() self.assertEqual(10, len(cpu.get_claimed_threads())) threads_per_socket = [] for p in cpu.get_packages(): ths = [] for c in p.get_cores(): for t in c.get_threads(): if t.is_claimed(): ths.append(t) threads_per_socket.append(len(ths)) self.assertEqual(5, threads_per_socket[0]) self.assertEqual(5, threads_per_socket[1])
def __get_threads_request(self, workload_id, workload_map, request_type): resource_usage = self.__wmm.get_resource_usage(workload_map.keys()) log.debug("resource_usage: %s", json.dumps(resource_usage.serialize())) cpu_usage = self.__get_optional_default(resource_usage.get_cpu_usage, {}) mem_usage = self.__get_optional_default(resource_usage.get_mem_usage, {}) net_recv_usage = self.__get_optional_default( resource_usage.get_net_recv_usage, {}) net_trans_usage = self.__get_optional_default( resource_usage.get_net_trans_usage, {}) disk_usage = self.__get_optional_default(resource_usage.get_disk_usage, {}) return AllocateThreadsRequest( cpu=self.get_cpu_copy(), workload_id=workload_id, workloads=workload_map, resource_usage=resource_usage, cpu_usage=cpu_usage, mem_usage=mem_usage, net_recv_usage=net_recv_usage, net_trans_usage=net_trans_usage, disk_usage=disk_usage, metadata=self.__get_request_metadata(request_type))
def test_forecast_ip_big_burst_pool_if_empty_instance(self): cpu = get_cpu() allocator = forecast_ip_alloc_simple w = get_test_workload("a", 1, BURST) request = AllocateThreadsRequest(cpu, "a", {"a": w}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() original_burst_claim_sz = len(cpu.get_claimed_threads()) # should at least consume all the cores: self.assertLessEqual( len(cpu.get_threads()) / 2, original_burst_claim_sz) w2 = get_test_workload("b", 3, STATIC) request = AllocateThreadsRequest(cpu, "b", { "a": w, "b": w2 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() new_burst_claim_sz = len(get_threads_with_workload(cpu, w2.get_id())) self.assertLess(new_burst_claim_sz, original_burst_claim_sz) total_claim_sz = len(cpu.get_claimed_threads()) self.assertLessEqual(3 + 1, total_claim_sz) self.assertLessEqual(1, new_burst_claim_sz) # there shouldn't be an empty core for p in cpu.get_packages(): for c in p.get_cores(): self.assertLess(0, sum(t.is_claimed() for t in c.get_threads())) request = AllocateThreadsRequest(cpu, "b", { "a": w, "b": w2 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.free_threads(request).get_cpu() request = AllocateRequest(cpu, {"a": w}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.rebalance(request).get_cpu() self.assertEqual(original_burst_claim_sz, len(cpu.get_claimed_threads()))
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: cpu = request.get_cpu() workloads = request.get_workloads() workload_id = request.get_workload_id() burst_workloads = get_burst_workloads(workloads.values()) release_all_threads(cpu, burst_workloads) if workloads[workload_id].get_type() == STATIC: self.__free_threads(cpu, workload_id, workloads) workloads.pop(workload_id) metadata = {} update_burst_workloads(cpu, workloads, self.__free_thread_provider, metadata) return AllocateResponse( cpu, get_workload_allocations(cpu, workloads.values()), self.get_name(), metadata)
def test_forecast_threshold_usage(self): allocator = ForecastIPCpuAllocator( TestCpuUsagePredictorManager(TestCpuUsagePredictor(10)), ConfigManager(TestPropertyProvider({})), ThresholdFreeThreadProvider(0.05)) thread_count = DEFAULT_TOTAL_THREAD_COUNT / 4 cpu = get_cpu() w0 = get_test_workload(uuid.uuid4(), thread_count, STATIC) log.info(w0) request = AllocateThreadsRequest(cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info(cpu) # All cores should be occupied for c in cpu.get_cores(): self.assertTrue( len(c.get_empty_threads()) == 1 or len(c.get_empty_threads()) == 2) w1 = get_test_workload(uuid.uuid4(), thread_count, BURST) log.info(w1) request = AllocateThreadsRequest(cpu, w1.get_id(), { w0.get_id(): w0, w1.get_id(): w1 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() log.info(cpu) for c in cpu.get_cores(): # Static workload should have unshared cores if len(c.get_empty_threads()) == 1: for t in c.get_threads(): if t.is_claimed(): self.assertEqual([w0.get_id()], t.get_workload_ids()) # Burst workload should have shared cores only with itself if len(c.get_empty_threads()) == 0: self.assertEqual(c.get_threads()[0].get_workload_ids(), c.get_threads()[1].get_workload_ids()) self.assertEqual([w1.get_id()], c.get_threads()[1].get_workload_ids())
def test_filling_holes_ip(self): """ Initialize with fragmented placement, then fill the instance. Result should be less fragmented, with the first workload completely filling a socket. | a | | a | | | | a | | a | | ------------- | | | a | | a | | a | | a | | """ cpu = get_cpu() allocator = IntegerProgramCpuAllocator() # Initialize fragmented workload wa = get_test_workload(uuid.uuid4(), 8, STATIC) p0 = cpu.get_packages()[0] p0.get_cores()[0].get_threads()[0].claim(wa.get_id()) p0.get_cores()[1].get_threads()[1].claim(wa.get_id()) p0.get_cores()[2].get_threads()[0].claim(wa.get_id()) p0.get_cores()[3].get_threads()[1].claim(wa.get_id()) p1 = cpu.get_packages()[1] p1.get_cores()[0].get_threads()[1].claim(wa.get_id()) p1.get_cores()[1].get_threads()[0].claim(wa.get_id()) p1.get_cores()[2].get_threads()[1].claim(wa.get_id()) p1.get_cores()[3].get_threads()[0].claim(wa.get_id()) self.assertEqual(8, len(cpu.get_empty_threads())) # Fill the rest of the CPU w0 = get_test_workload(uuid.uuid4(), 2, STATIC) w1 = get_test_workload(uuid.uuid4(), 3, STATIC) w2 = get_test_workload(uuid.uuid4(), 1, STATIC) w3 = get_test_workload(uuid.uuid4(), 2, STATIC) workload_map = {wa.get_id(): wa} workloads = [w0, w1, w2, w3] for w in workloads: workload_map[w.get_id()] = w request = AllocateThreadsRequest(cpu, w.get_id(), workload_map, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() self.assertEqual(0, len(cpu.get_empty_threads())) # first workload should be filling completely a socket to avoid cross-socket job layout for package in cpu.get_packages(): if package.get_cores()[0].get_threads()[0].get_workload_ids( ) != wa.get_id(): continue ids = [ t.get_workload_ids() for core in package.get_cores() for t in core.get_threads() ] self.assertListEqual(ids, [wa.get_id()] * 8)
def test_one_cross_package_violation(self): cpu = get_cpu() allocator = IntegerProgramCpuAllocator() w = get_test_workload(uuid.uuid4(), 9, STATIC) request = AllocateThreadsRequest(cpu, w.get_id(), {w.get_id(): w}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() violations = get_cross_package_violations(cpu) self.assertEqual(1, len(violations))
def get_no_usage_threads_request(cpu: Cpu, workloads: List[Workload]): return AllocateThreadsRequest(cpu=cpu, workload_id=workloads[-1].get_id(), workloads=__workloads_list_to_map(workloads), resource_usage=GlobalResourceUsage({}), cpu_usage={}, mem_usage={}, net_recv_usage={}, net_trans_usage={}, disk_usage={}, metadata=DEFAULT_TEST_REQUEST_METADATA)
def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: url = "{}/assign_threads".format(self.__url) body = request.to_dict() log.debug("url: {}, body: {}".format(url, body)) response = requests.put(url, json=body, headers=self.__headers, timeout=self.__timeout) log.debug("assign_threads response code: {}".format(response.status_code)) if response.status_code == 200: return deserialize_response(response.headers, response.json()) raise CpuAllocationException("Failed to assign threads: {}".format(response.text))
def test_assign_two_workloads_empty_cpu_ip(self): """ Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:1 t:0) Workload 1: 1 thread --> (p:1 c:0 t:0) """ for allocator in [ IntegerProgramCpuAllocator(), forecast_ip_alloc_simple ]: cpu = get_cpu() w0 = get_test_workload(uuid.uuid4(), 2, STATIC) w1 = get_test_workload(uuid.uuid4(), 1, STATIC) request0 = AllocateThreadsRequest(cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request0).get_cpu() request1 = AllocateThreadsRequest(cpu, w1.get_id(), { w0.get_id(): w0, w1.get_id(): w1 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request1).get_cpu() self.assertEqual(3, len(cpu.get_claimed_threads())) ids_per_socket = [] for pid, p in enumerate(cpu.get_packages()): r = [] for cid, c in enumerate(p.get_cores()): for tid, t in enumerate(c.get_threads()): if t.is_claimed(): self.assertEqual(1, len(t.get_workload_ids())) r.append((t.get_workload_ids()[0], c.get_id())) ids_per_socket.append(r) for r in ids_per_socket: # each workload should be on a different socket self.assertEqual(1, len(set([e[0] for e in r]))) # assigned threads should be on different coreds core_ids = [e[1] for e in r] self.assertEqual(len(set(core_ids)), len(core_ids))
def test_assign_threads(self): cpu = get_cpu() cgroup_manager = MockCgroupManager() cpu_allocator = NoopResetCpuAllocator("", cgroup_manager) w = get_test_workload(uuid.uuid4(), 1, STATIC) request = AllocateThreadsRequest(cpu, w.get_id(), {w.get_id(): w}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu_allocator.assign_threads(request) self.assertEqual(1, cgroup_manager.container_update_counts[w.get_id()]) self.assertEqual(len(cpu.get_threads()), len(cgroup_manager.container_update_map[w.get_id()]))
def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse: try: self.__primary_free_threads_call_count += 1 return self.__primary_allocator.free_threads(request) except: log.exception( "Failed to free threads for workload: '{}' with primary allocator: '{}', falling back to: '{}'".format( request.get_workload_id(), self.__primary_allocator.__class__.__name__, self.__secondary_allocator.__class__.__name__)) self.__secondary_free_threads_call_count += 1 return self.__secondary_allocator.free_threads(request)
def test_balance_forecast_ip(self): cpu = get_cpu() w1 = get_test_workload("a", 2, STATIC) w2 = get_test_workload("b", 4, BURST) allocator = forecast_ip_alloc_simple request = AllocateThreadsRequest(cpu, "a", {"a": w1}, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() request = AllocateThreadsRequest(cpu, "b", { "a": w1, "b": w2 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() request = AllocateRequest(cpu, { "a": w1, "b": w2 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.rebalance(request).get_cpu() self.assertLessEqual(2 + 4, len(cpu.get_claimed_threads())) w2t = cpu.get_workload_ids_to_thread_ids() self.assertEqual(2, len(w2t["a"])) self.assertLessEqual(4, len(w2t["b"])) # burst got at least 4 for _ in range(20): request = AllocateRequest(cpu, { "a": w1, "b": w2 }, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.rebalance(request).get_cpu() w2t = cpu.get_workload_ids_to_thread_ids() self.assertEqual(2, len(w2t["a"])) self.assertLessEqual(4, len(w2t["b"]))