def test_assign_two_workloads_empty_cpu_greedy(self): """ Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:0 t:1) Workload 1: 1 thread --> (p:1 c:0 t:0) """ cpu = get_cpu() allocator = GreedyCpuAllocator() w0 = get_test_workload(uuid.uuid4(), 2, STATIC) w1 = get_test_workload(uuid.uuid4(), 1, STATIC) request0 = get_no_usage_threads_request(cpu, [w0]) cpu = allocator.assign_threads(request0).get_cpu() request1 = get_no_usage_threads_request(cpu, [w0, w1]) cpu = allocator.assign_threads(request1).get_cpu() self.assertEqual(3, len(cpu.get_claimed_threads())) packages = cpu.get_packages() # WORKLOAD 0 core00 = packages[0].get_cores()[0] thread0 = core00.get_threads()[0] self.assertEqual(0, thread0.get_id()) self.assertTrue(thread0.is_claimed()) thread1 = core00.get_threads()[1] self.assertEqual(8, thread1.get_id()) self.assertTrue(thread1.is_claimed()) # WORKLOAD 1 core00 = packages[1].get_cores()[0] thread4 = core00.get_threads()[0] self.assertEqual(4, thread4.get_id()) self.assertTrue(thread4.is_claimed())
def test_external_cpu_manipulation(self): cpu = get_cpu() violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(0, len(violations)) # Claim 1 thread on every core dummy_workload_id = uuid.uuid4() for p in cpu.get_packages(): for c in p.get_cores(): c.get_threads()[0].claim(dummy_workload_id) violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(0, len(violations)) # Assign another workload which will force core sharing allocator = GreedyCpuAllocator() w = get_test_workload(uuid.uuid4(), 2, STATIC) workloads = {w.get_id(): w} request = AllocateThreadsRequest(cpu, w.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu = allocator.assign_threads(request).get_cpu() violations = get_shared_core_violations(cpu) log.info("shared core violations: {}".format(violations)) self.assertEqual(2, len(violations))
def test_override_previous_assignment(self): """ Workload 0: 1 thread --> (p:0 c:0 t:0) """ cpu = get_cpu() self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads())) w0 = get_test_workload(uuid.uuid4(), 1, STATIC) w1 = get_test_workload(uuid.uuid4(), 2, STATIC) greedy_allocator = GreedyCpuAllocator() # Assign the first workload with Greedy request = get_no_usage_threads_request(cpu, [w0]) cpu = greedy_allocator.assign_threads(request).get_cpu() log.info(cpu) self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT - 1, len(cpu.get_empty_threads())) self.assertEqual(1, len(cpu.get_claimed_threads())) # Assign the second workload with NoopReset request = get_no_usage_threads_request(cpu, [w0, w1]) cpu = noop_reset_allocator.assign_threads(request).get_cpu() log.info(cpu) self.assertEqual(0, len(cpu.get_empty_threads())) self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_claimed_threads())) for t in cpu.get_threads(): self.assertEqual(2, len(t.get_workload_ids())) self.assertTrue(w0.get_id() in t.get_workload_ids()) self.assertTrue(w1.get_id() in t.get_workload_ids())
def test_allocators_that_are_none(self): with self.assertRaises(ValueError): FallbackCpuAllocator(GreedyCpuAllocator(), None) with self.assertRaises(ValueError): FallbackCpuAllocator(None, GreedyCpuAllocator()) with self.assertRaises(ValueError): FallbackCpuAllocator(None, None)
def test_assign_free_threads(self): cpu = get_cpu() workload = get_test_workload("a", 2, STATIC) workloads = {workload.get_id(): workload} cpu_allocator = GreedyCpuAllocator() self.__set_cpu_allocator(cpu_allocator) # Assign threads log.info("Assign threads") cpu_in_0 = copy.deepcopy(cpu) request = AllocateThreadsRequest(cpu_in_0, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu_out_0 = cpu_allocator.assign_threads(request).get_cpu() cpu_in_1 = copy.deepcopy(cpu) body = AllocateThreadsRequest(cpu_in_1, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA).to_dict() cpu_out_1 = self.client.put("/assign_threads", data=json.dumps(body), content_type='application/json') cpu_out_1 = deserialize_response(cpu_out_1.headers, cpu_out_1.json).get_cpu() log.info("cpu_out_0: {}".format(cpu_out_0)) log.info("cpu_out_1: {}".format(cpu_out_1)) self.assertEqual(cpu_out_0.to_dict(), cpu_out_1.to_dict()) # Free threads log.info("Free threads") cpu_in_0 = copy.deepcopy(cpu_out_0) request = AllocateThreadsRequest(cpu_in_0, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA) cpu_out_0 = cpu_allocator.free_threads(request).get_cpu() cpu_in_1 = copy.deepcopy(cpu_out_1) body = AllocateThreadsRequest(cpu_in_1, workload.get_id(), workloads, {}, DEFAULT_TEST_REQUEST_METADATA).to_dict() cpu_out_1 = self.client.put("/free_threads", data=json.dumps(body), content_type='application/json') cpu_out_1 = deserialize_response(cpu_out_1.headers, cpu_out_1.json).get_cpu() log.info("cpu_out_0: {}".format(cpu_out_0)) log.info("cpu_out_1: {}".format(cpu_out_1)) self.assertEqual(cpu_out_0.to_dict(), cpu_out_1.to_dict())
def test_thread_allocation_computation(self): for allocator in [IntegerProgramCpuAllocator(), GreedyCpuAllocator()]: static_thread_count = 2 burst_thread_count = 4 w_static = get_test_workload("s", static_thread_count, STATIC) w_burst = get_test_workload("b", burst_thread_count, BURST) cgroup_manager = MockCgroupManager() registry = Registry() workload_manager = WorkloadManager(get_cpu(), cgroup_manager, allocator) workload_manager.set_registry(registry, {}) workload_manager.add_workload(w_static) workload_manager.add_workload(w_burst) workload_manager.report_metrics({}) total_thread_count = len(workload_manager.get_cpu().get_threads()) expected_burst_allocation_size = total_thread_count - static_thread_count self.assertTrue( gauge_value_equals(registry, ALLOCATED_SIZE_KEY, total_thread_count)) self.assertTrue( gauge_value_equals(registry, UNALLOCATED_SIZE_KEY, 0)) self.assertTrue( gauge_value_equals(registry, STATIC_ALLOCATED_SIZE_KEY, static_thread_count)) self.assertTrue( gauge_value_equals(registry, BURST_ALLOCATED_SIZE_KEY, expected_burst_allocation_size)) self.assertTrue( gauge_value_equals(registry, BURST_REQUESTED_SIZE_KEY, burst_thread_count)) self.assertTrue( gauge_value_equals(registry, OVERSUBSCRIBED_THREADS_KEY, 0)) # Claim every thread for the burst workload which will oversubscribe the static threads for t in workload_manager.get_cpu().get_threads(): t.claim(w_burst.get_id()) workload_manager.report_metrics({}) self.assertTrue( gauge_value_equals(registry, ALLOCATED_SIZE_KEY, total_thread_count)) self.assertTrue( gauge_value_equals(registry, UNALLOCATED_SIZE_KEY, 0)) self.assertTrue( gauge_value_equals(registry, STATIC_ALLOCATED_SIZE_KEY, static_thread_count)) self.assertTrue( gauge_value_equals(registry, BURST_ALLOCATED_SIZE_KEY, total_thread_count)) self.assertTrue( gauge_value_equals(registry, BURST_REQUESTED_SIZE_KEY, burst_thread_count)) self.assertTrue( gauge_value_equals(registry, OVERSUBSCRIBED_THREADS_KEY, static_thread_count))
def test_assign_two_threads_empty_cpu_greedy(self): """ Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:1 t:1) """ cpu = get_cpu() allocator = GreedyCpuAllocator(cpu) w = Workload(uuid.uuid4(), 2, STATIC) allocator.assign_threads(w) self.assertEqual(2, len(cpu.get_claimed_threads())) # Expected core and threads core00 = cpu.get_packages()[0].get_cores()[0] thread0 = core00.get_threads()[0] self.assertEqual(0, thread0.get_id()) self.assertTrue(thread0.is_claimed()) thread1 = core00.get_threads()[1] self.assertEqual(8, thread1.get_id()) self.assertTrue(thread1.is_claimed())
def test_assign_free_threads(self): cpu = get_cpu() workload = get_test_workload("a", 2, STATIC) cpu_allocator = GreedyCpuAllocator() self.__set_cpu_allocator(cpu_allocator) # Assign threads log.info("Assign threads") cpu_in_0 = copy.deepcopy(cpu) request = get_no_usage_threads_request(cpu_in_0, [workload]) cpu_out_0 = cpu_allocator.assign_threads(request).get_cpu() cpu_in_1 = copy.deepcopy(cpu) request = get_no_usage_threads_request(cpu_in_1, [workload]) cpu_out_1 = self.client.put( "/assign_threads", data=json.dumps(request.to_dict()), content_type='application/json') cpu_out_1 = deserialize_response(cpu_out_1.headers, cpu_out_1.json).get_cpu() log.info("cpu_out_0: {}".format(cpu_out_0)) log.info("cpu_out_1: {}".format(cpu_out_1)) self.assertEqual(cpu_out_0.to_dict(), cpu_out_1.to_dict()) # Free threads log.info("Free threads") cpu_in_0 = copy.deepcopy(cpu_out_0) request = get_no_usage_threads_request(cpu_in_0, [workload]) cpu_out_0 = cpu_allocator.free_threads(request).get_cpu() cpu_in_1 = copy.deepcopy(cpu_out_1) request = get_no_usage_threads_request(cpu_in_1, [workload]) cpu_out_1 = self.client.put( "/free_threads", data=json.dumps(request.to_dict()), content_type='application/json') cpu_out_1 = deserialize_response(cpu_out_1.headers, cpu_out_1.json).get_cpu() log.info("cpu_out_0: {}".format(cpu_out_0)) log.info("cpu_out_1: {}".format(cpu_out_1)) self.assertEqual(cpu_out_0.to_dict(), cpu_out_1.to_dict())
def test_no_change_populated_cpu(self): w0 = Workload(uuid.uuid4(), 4, STATIC) cur_cpu = get_cpu() new_cpu = get_cpu() allocator0 = GreedyCpuAllocator(cur_cpu) allocator0.assign_threads(w0) allocator1 = GreedyCpuAllocator(new_cpu) allocator1.assign_threads(w0) self.assertFalse(has_better_isolation(cur_cpu, new_cpu))
def test_is_isolated(self): real_allocators = [GreedyCpuAllocator(), IntegerProgramCpuAllocator()] for allocator in real_allocators: wm = WorkloadManager(get_cpu(), MockCgroupManager(), allocator) self.assertFalse(wm.is_isolated(uuid.uuid4())) for allocator in real_allocators: workload = get_test_workload(uuid.uuid4(), DEFAULT_TOTAL_THREAD_COUNT, STATIC) wm = WorkloadManager(get_cpu(), MockCgroupManager(), allocator) wm.add_workload(workload) self.assertTrue(wm.is_isolated(workload.get_id())) wm = WorkloadManager(get_cpu(), MockCgroupManager(), NoopCpuAllocator()) self.assertTrue(wm.is_isolated(uuid.uuid4()))
def test_no_change_populated_cpu(self): w0 = get_test_workload(uuid.uuid4(), 4, STATIC) cur_cpu = get_cpu() new_cpu = get_cpu() allocator0 = GreedyCpuAllocator() request = AllocateThreadsRequest(cur_cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) cur_cpu = allocator0.assign_threads(request).get_cpu() allocator1 = GreedyCpuAllocator() request = AllocateThreadsRequest(new_cpu, w0.get_id(), {w0.get_id(): w0}, {}, DEFAULT_TEST_REQUEST_METADATA) new_cpu = allocator1.assign_threads(request).get_cpu() self.assertFalse(has_better_isolation(cur_cpu, new_cpu))
def test_no_change_populated_cpu(self): w0 = get_test_workload(uuid.uuid4(), 4, STATIC) cur_cpu = get_cpu() new_cpu = get_cpu() allocator0 = GreedyCpuAllocator() request = get_no_usage_threads_request(cur_cpu, [w0]) cur_cpu = allocator0.assign_threads(request).get_cpu() allocator1 = GreedyCpuAllocator() request = get_no_usage_threads_request(new_cpu, [w0]) new_cpu = allocator1.assign_threads(request).get_cpu() self.assertFalse(has_better_isolation(cur_cpu, new_cpu))
def set_pod(self, pod: V1Pod): self.pod = pod def get_pod(self, pod_name: str) -> Optional[V1Pod]: return self.pod forecast_ip_alloc_simple = ForecastIPCpuAllocator( TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})), OversubscribeFreeThreadProvider(0.1)) ALLOCATORS = [ NaiveCpuAllocator(), IntegerProgramCpuAllocator(), GreedyCpuAllocator(), forecast_ip_alloc_simple ] OVER_ALLOCATORS = [NaiveCpuAllocator(), forecast_ip_alloc_simple] set_workload_monitor_manager(TestWorkloadMonitorManager()) class TestCpu(unittest.TestCase): def test_assign_one_thread_empty_cpu(self): """ Workload 0: 1 thread --> (p:0 c:0 t:0) """ for allocator in ALLOCATORS: cpu = get_cpu() self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads()))
OVERSUBSCRIBED_THREADS_KEY, STATIC_ALLOCATED_SIZE_KEY, BURST_ALLOCATED_SIZE_KEY, \ BURST_REQUESTED_SIZE_KEY, ALLOCATED_SIZE_KEY, UNALLOCATED_SIZE_KEY from titus_isolate.model.processor.config import get_cpu from titus_isolate.model.processor.utils import DEFAULT_TOTAL_THREAD_COUNT, is_cpu_full from titus_isolate.monitor.oversubscribe_free_thread_provider import OversubscribeFreeThreadProvider from titus_isolate.utils import set_config_manager, set_workload_monitor_manager config_logs(logging.DEBUG) set_config_manager(ConfigManager(TestPropertyProvider({}))) set_workload_monitor_manager(TestWorkloadMonitorManager()) forecast_ip_alloc_simple = ForecastIPCpuAllocator( TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})), OversubscribeFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD)) LEGACY_ALLOCATORS = [IntegerProgramCpuAllocator(), GreedyCpuAllocator()] OVERSUBSCRIBING_ALLOCATORS = [forecast_ip_alloc_simple] ALLOCATORS = LEGACY_ALLOCATORS + OVERSUBSCRIBING_ALLOCATORS class TestWorkloadManager(unittest.TestCase): def test_single_static_workload_lifecycle(self): for allocator in ALLOCATORS: thread_count = 2 workload = get_test_workload(uuid.uuid4(), thread_count, STATIC) cgroup_manager = MockCgroupManager() workload_manager = WorkloadManager(get_cpu(), cgroup_manager, allocator) # Add workload