def test_single_burst_workload_lifecycle(self): for allocator_class in [ GreedyCpuAllocator, IntegerProgramCpuAllocator ]: thread_count = 2 workload = Workload(uuid.uuid4(), thread_count, BURST) cgroup_manager = MockCgroupManager() workload_manager = WorkloadManager(get_cpu(), cgroup_manager, allocator_class=allocator_class) # Add workload workload_manager.add_workload(workload) self.assertEqual( 2, cgroup_manager.container_update_counts[workload.get_id()]) # All threads should have been assigned to the only burst workload. self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT, len(cgroup_manager.container_update_map[workload.get_id()])) # No threads should have been consumed from the cpu model perspective. self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT, len(workload_manager.get_cpu().get_empty_threads())) # Remove workload workload_manager.remove_workload(workload.get_id()) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT, len(workload_manager.get_cpu().get_empty_threads()))
def test_single_static_workload_lifecycle(self): for allocator_class in [ GreedyCpuAllocator, IntegerProgramCpuAllocator ]: thread_count = 2 workload = Workload(uuid.uuid4(), thread_count, STATIC) cgroup_manager = MockCgroupManager() workload_manager = WorkloadManager(get_cpu(), cgroup_manager, allocator_class=allocator_class) # Add workload workload_manager.add_workload(workload) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT - thread_count, len(workload_manager.get_cpu().get_empty_threads())) self.assertEqual( 1, cgroup_manager.container_update_counts[workload.get_id()]) # Remove workload workload_manager.remove_workload(workload.get_id()) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT, len(workload_manager.get_cpu().get_empty_threads()))
def predict(self, workload: Workload, cpu_usage_last_hour: np.array, pred_env: PredEnvironment) -> float: if workload.get_id() == 'static_a': return workload.get_thread_count() * 0.8 elif workload.get_id() == 'static_b': return workload.get_thread_count() * 0.01 elif workload.get_id() == 'burst_c': return workload.get_thread_count() * 0.9
def test_assign_threads(self): cpu = get_cpu() cgroup_manager = MockCgroupManager() cpu_allocator = NoopResetCpuAllocator(cpu) cpu_allocator.set_cgroup_manager(cgroup_manager) w = Workload(uuid.uuid4(), 1, STATIC) cpu_allocator.assign_threads(w) self.assertEqual(1, cgroup_manager.container_update_counts[w.get_id()]) self.assertEqual(len(cpu.get_threads()), len(cgroup_manager.container_update_map[w.get_id()]))
def handle(self, event): if not self.__relevant(event): return name = get_container_name(event) cpus = get_cpu_count(event) workload_type = get_workload_type(event) workload = Workload(name, cpus, workload_type) self.handling_event(event, "adding workload: '{}'".format(workload.get_id())) self.workload_manager.add_workload(workload) self.handled_event(event, "added workload: '{}'".format(workload.get_id()))
def test_remove_unknown_workload(self): for allocator_class in [ GreedyCpuAllocator, IntegerProgramCpuAllocator ]: unknown_workload_id = "unknown" thread_count = 2 workload = Workload(uuid.uuid4(), thread_count, STATIC) workload_manager = WorkloadManager(get_cpu(), MockCgroupManager(), allocator_class=allocator_class) # Remove from empty set workload_manager.remove_workload([unknown_workload_id]) # Add workload workload_manager.add_workload(workload) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT - thread_count, len(workload_manager.get_cpu().get_empty_threads())) # Removal of an unknown workload should have no effect workload_manager.remove_workload([unknown_workload_id]) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT - thread_count, len(workload_manager.get_cpu().get_empty_threads())) # Remove workload with unknown workload, real workload should be removed workload_manager.remove_workload(unknown_workload_id) workload_manager.remove_workload(workload.get_id()) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT, len(workload_manager.get_cpu().get_empty_threads()))
def test_construction(self): identifier = uuid.uuid4() thread_count = 2 workload_type = STATIC w = Workload(identifier, thread_count, workload_type) self.assertEqual(identifier, w.get_id()) self.assertEqual(thread_count, w.get_thread_count()) self.assertEqual(workload_type, w.get_type())
def test_free_cpu(self): for allocator_class in ALLOCATORS: cpu = get_cpu() allocator = allocator_class(cpu) self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads())) w = Workload(uuid.uuid4(), 3, STATIC) allocator.assign_threads(w) self.assertEqual( DEFAULT_TOTAL_THREAD_COUNT - w.get_thread_count(), len(cpu.get_empty_threads())) allocator.free_threads(w.get_id()) self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads()))
def test_filling_holes_ip(self): """ Initialize with fragmented placement, then fill the instance. Result should be less fragmented, with the first workload completely filling a socket. | a | | a | | | | a | | a | | ------------- | | | a | | a | | a | | a | | """ cpu = get_cpu() allocator = IntegerProgramCpuAllocator(cpu) # Initialize fragmented workload wa = Workload(uuid.uuid4(), 8, STATIC) p0 = cpu.get_packages()[0] p0.get_cores()[0].get_threads()[0].claim(wa.get_id()) p0.get_cores()[1].get_threads()[1].claim(wa.get_id()) p0.get_cores()[2].get_threads()[0].claim(wa.get_id()) p0.get_cores()[3].get_threads()[1].claim(wa.get_id()) p1 = cpu.get_packages()[1] p1.get_cores()[0].get_threads()[1].claim(wa.get_id()) p1.get_cores()[1].get_threads()[0].claim(wa.get_id()) p1.get_cores()[2].get_threads()[1].claim(wa.get_id()) p1.get_cores()[3].get_threads()[0].claim(wa.get_id()) self.assertEqual(8, len(cpu.get_empty_threads())) # Fill the rest of the CPU w0 = Workload(uuid.uuid4(), 2, STATIC) w1 = Workload(uuid.uuid4(), 3, STATIC) w2 = Workload(uuid.uuid4(), 1, STATIC) w3 = Workload(uuid.uuid4(), 2, STATIC) workloads = [wa, w0, w1, w2, w3] for w in workloads: allocator.assign_threads(w) self.assertEqual(0, len(cpu.get_empty_threads())) # first workload should be filling completely a socket to avoid cross-socket job layout for package in cpu.get_packages(): if package.get_cores()[0].get_threads()[0].get_workload_id() != wa.get_id(): continue ids = [t.get_workload_id() for core in package.get_cores() for t in core.get_threads()] self.assertListEqual(ids, [wa.get_id()] * 8)
def test_free_cpu_3_workloads(self): # Add 3 workloads sequentially, and then remove the 2nd one added. for allocator_class in ALLOCATORS: cpu = get_cpu() allocator = allocator_class(cpu) w0 = Workload(123, 3, STATIC) w1 = Workload(456, 2, STATIC) w2 = Workload(789, 4, STATIC) allocator.assign_threads(w0) allocator.assign_threads(w1) allocator.assign_threads(w2) self.assertEqual(3 + 4 + 2, len(cpu.get_claimed_threads())) allocator.free_threads(w1.get_id()) self.assertEqual(3 + 4, len(cpu.get_claimed_threads())) workload_ids_left = set() for t in cpu.get_threads(): if t.is_claimed(): workload_ids_left.add(t.get_workload_id()) self.assertListEqual(sorted(list(workload_ids_left)), [123, 789])
def test_alternating_static_burst_workloads(self): for allocator_class in [ GreedyCpuAllocator, IntegerProgramCpuAllocator ]: thread_count = 2 burst0 = Workload("burst0", thread_count, BURST) burst1 = Workload("burst1", thread_count, BURST) static0 = Workload("static0", thread_count, STATIC) static1 = Workload("static1", thread_count, STATIC) cgroup_manager = MockCgroupManager() workload_manager = WorkloadManager(get_cpu(), cgroup_manager, allocator_class=allocator_class) # Add static workload log.info("ADDING STATIC0") workload_manager.add_workload(static0) self.assertTrue( static0.get_id() in cgroup_manager.container_update_map) self.assertEqual( thread_count, len(cgroup_manager.container_update_map[static0.get_id()])) self.assertEqual( 1, cgroup_manager.container_update_counts[static0.get_id()]) expected_free_thread_count = DEFAULT_TOTAL_THREAD_COUNT - thread_count self.assertEqual( expected_free_thread_count, len(workload_manager.get_cpu().get_empty_threads())) # Add burst workload log.info("ADDING BURST0") workload_manager.add_workload(burst0) self.assertEqual( expected_free_thread_count, len(cgroup_manager.container_update_map[burst0.get_id()])) self.assertEqual( 2, cgroup_manager.container_update_counts[burst0.get_id()]) # No change in empty threads expected self.assertEqual( expected_free_thread_count, len(workload_manager.get_cpu().get_empty_threads())) # Add static workload log.info("ADDING STATIC1") workload_manager.add_workload(static1) self.assertEqual( thread_count, len(cgroup_manager.container_update_map[static1.get_id()])) self.assertEqual( 1, cgroup_manager.container_update_counts[static1.get_id()]) expected_free_thread_count = expected_free_thread_count - thread_count self.assertEqual( expected_free_thread_count, len(workload_manager.get_cpu().get_empty_threads())) # The burst0 container should be updated again because the burst footprint changed after the addition of a # static workload self.assertEqual( 3, cgroup_manager.container_update_counts[burst0.get_id()]) self.assertEqual( expected_free_thread_count, len(cgroup_manager.container_update_map[burst0.get_id()])) # Add burst workload log.info("ADDING BURST1") workload_manager.add_workload(burst1) self.assertEqual( 4, cgroup_manager.container_update_counts[burst0.get_id()]) self.assertEqual( 2, cgroup_manager.container_update_counts[burst1.get_id()]) self.assertEqual( expected_free_thread_count, len(cgroup_manager.container_update_map[burst1.get_id()])) # No change in empty threads expected self.assertEqual( expected_free_thread_count, len(workload_manager.get_cpu().get_empty_threads())) # Remove static workload log.info("REMOVING STATIC0") workload_manager.remove_workload(static0.get_id()) self.assertEqual( 5, cgroup_manager.container_update_counts[burst0.get_id()]) self.assertEqual( 3, cgroup_manager.container_update_counts[burst1.get_id()]) # Empty threads should have increased expected_free_thread_count = expected_free_thread_count + thread_count self.assertEqual( expected_free_thread_count, len(workload_manager.get_cpu().get_empty_threads())) self.assertEqual( expected_free_thread_count, len(cgroup_manager.container_update_map[burst0.get_id()])) self.assertEqual( expected_free_thread_count, len(cgroup_manager.container_update_map[burst1.get_id()]))