def test_high_static_usage(self):
        # Threshold
        free_threads = self.__test_uniform_usage(
            TEST_THRESHOLD_USAGE + 0.001,
            OversubscribeFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))
        self.assertEqual(TEST_WORKLOAD_THREAD_COUNT * 2, len(free_threads))

        # Oversubscribe
        free_threads = self.__test_uniform_usage(
            TEST_THRESHOLD_USAGE + 0.001,
            OversubscribeFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))
        self.assertEqual(TEST_WORKLOAD_THREAD_COUNT * 2, len(free_threads))
    def test_forecast_threshold_no_usage(self):
        allocator = ForecastIPCpuAllocator(
            TestCpuUsagePredictorManager(),
            ConfigManager(TestPropertyProvider({})),
            OversubscribeFreeThreadProvider(0.1))

        thread_count = DEFAULT_TOTAL_THREAD_COUNT / 2
        cpu = get_cpu()

        w0 = get_test_workload(uuid.uuid4(), thread_count, STATIC)

        request = get_no_usage_threads_request(cpu, [w0])
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        # All cores should be occupied
        for c in cpu.get_cores():
            self.assertEqual(1, len(c.get_empty_threads()))

        w1 = get_test_workload(uuid.uuid4(), thread_count, BURST)
        request = get_no_usage_threads_request(cpu, [w0, w1])
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        # No threads should be shared
        for c in cpu.get_cores():
            self.assertEqual(c.get_threads()[0].get_workload_ids(),
                             c.get_threads()[1].get_workload_ids())
    def test_forecast_ip_burst_pool_with_usage(self):
        class UsagePredictorWithBurst:
            def __init__(self):
                self.__model = TestPredictor()

            def predict(self, workload: Workload,
                        cpu_usage_last_hour: np.array,
                        pred_env: PredEnvironment) -> float:
                if workload.get_id() == 'static_a':
                    return workload.get_thread_count() * 0.8
                elif workload.get_id() == 'static_b':
                    return workload.get_thread_count() * 0.01
                elif workload.get_id() == 'burst_c':
                    return workload.get_thread_count() * 0.9

            def get_model(self):
                return self.__model

        upm = TestCpuUsagePredictorManager(UsagePredictorWithBurst())
        cm = ConfigManager(
            TestPropertyProvider({BURST_CORE_COLLOC_USAGE_THRESH: 0.9}))
        allocator = ForecastIPCpuAllocator(
            upm, cm, OversubscribeFreeThreadProvider(0.1))

        cpu = get_cpu(package_count=2, cores_per_package=16)
        w_a = get_test_workload("static_a", 14, STATIC)
        w_b = get_test_workload("static_b", 14, STATIC)
        w_c = get_test_workload("burst_c", 2, BURST)

        request = get_no_usage_threads_request(cpu, [w_a])
        cpu = allocator.assign_threads(request).get_cpu()

        request = get_no_usage_threads_request(cpu, [w_a, w_c])
        cpu = allocator.assign_threads(request).get_cpu()
        # with an aggressive burst pool expansion, burst should be collocated with static on cores:
        self.assertLess(40, len(cpu.get_claimed_threads()))
        num_burst_1 = len(cpu.get_workload_ids_to_thread_ids()[w_c.get_id()])

        request = get_no_usage_threads_request(cpu, [w_a, w_c, w_b])
        cpu = allocator.assign_threads(request).get_cpu()
        # burst should retract, and prefer collocation with b over a:
        num_burst_2 = len(cpu.get_workload_ids_to_thread_ids()[w_c.get_id()])
        self.assertLessEqual(num_burst_2, num_burst_1)

        colloc_a = 0
        colloc_b = 0
        for p in cpu.get_packages():
            for c in p.get_cores():
                t1 = c.get_threads()[0]
                t2 = c.get_threads()[1]
                if t1.is_claimed() and t2.is_claimed():
                    wt1 = t1.get_workload_ids()[0]
                    wt2 = t2.get_workload_ids()[0]
                    if (wt1 == w_a.get_id() and wt2 == w_c.get_id()) or (
                            wt1 == w_c.get_id() and wt2 == w_a.get_id()):
                        colloc_a += 1
                    elif (wt1 == w_b.get_id() and wt2 == w_c.get_id()) or (
                            wt1 == w_c.get_id() and wt2 == w_b.get_id()):
                        colloc_b += 1
        self.assertLessEqual(colloc_a, colloc_b)
Exemple #4
0
def get_free_thread_provider(
        config_manager: ConfigManager) -> FreeThreadProvider:
    free_thread_provider_str = config_manager.get_str(
        FREE_THREAD_PROVIDER, DEFAULT_FREE_THREAD_PROVIDER)
    free_thread_provider = None

    total_threshold = config_manager.get_float(TOTAL_THRESHOLD,
                                               DEFAULT_TOTAL_THRESHOLD)

    if free_thread_provider_str == EMPTY:
        free_thread_provider = EmptyFreeThreadProvider()
    elif free_thread_provider_str == OVERSUBSCRIBE:
        free_thread_provider = OversubscribeFreeThreadProvider(total_threshold)

    log.debug("Free thread provider: '{}'".format(
        free_thread_provider.__class__.__name__))
    return free_thread_provider
    def test_forecast_threshold_usage(self):
        allocator = ForecastIPCpuAllocator(
            TestCpuUsagePredictorManager(TestCpuUsagePredictor(10)),
            ConfigManager(TestPropertyProvider({})),
            OversubscribeFreeThreadProvider(0.05))

        thread_count = DEFAULT_TOTAL_THREAD_COUNT / 4
        cpu = get_cpu()

        w0 = get_test_workload("s", thread_count, STATIC)
        log.info(w0)

        request = get_no_usage_threads_request(cpu, [w0])
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        # All cores should be occupied
        for c in cpu.get_cores():
            self.assertTrue(
                len(c.get_empty_threads()) == 1
                or len(c.get_empty_threads()) == 2)

        w1 = get_test_workload("b", thread_count, BURST)
        log.info(w1)
        request = get_no_usage_threads_request(cpu, [w0, w1])
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        for c in cpu.get_cores():
            # Static workload should have unshared cores
            if len(c.get_empty_threads()) == 1:
                for t in c.get_threads():
                    if t.is_claimed():
                        self.assertEqual([w0.get_id()], t.get_workload_ids())
            # Burst workload should have shared cores only with itself
            if len(c.get_empty_threads()) == 0:
                self.assertEqual(c.get_threads()[0].get_workload_ids(),
                                 c.get_threads()[1].get_workload_ids())
                self.assertEqual([w1.get_id()],
                                 c.get_threads()[1].get_workload_ids())

class TestPodManager:
    def __init__(self):
        self.pod = None

    def set_pod(self, pod: V1Pod):
        self.pod = pod

    def get_pod(self, pod_name: str) -> Optional[V1Pod]:
        return self.pod


forecast_ip_alloc_simple = ForecastIPCpuAllocator(
    TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})),
    OversubscribeFreeThreadProvider(0.1))

ALLOCATORS = [
    NaiveCpuAllocator(),
    IntegerProgramCpuAllocator(),
    GreedyCpuAllocator(), forecast_ip_alloc_simple
]
OVER_ALLOCATORS = [NaiveCpuAllocator(), forecast_ip_alloc_simple]

set_workload_monitor_manager(TestWorkloadMonitorManager())


class TestCpu(unittest.TestCase):
    def test_assign_one_thread_empty_cpu(self):
        """
        Workload 0: 1 thread --> (p:0 c:0 t:0)
from titus_isolate.metrics.constants import RUNNING, ADDED_KEY, REMOVED_KEY, SUCCEEDED_KEY, FAILED_KEY, \
    WORKLOAD_COUNT_KEY, PACKAGE_VIOLATIONS_KEY, CORE_VIOLATIONS_KEY, IP_ALLOCATOR_TIMEBOUND_COUNT, \
    OVERSUBSCRIBED_THREADS_KEY, STATIC_ALLOCATED_SIZE_KEY, BURST_ALLOCATED_SIZE_KEY, \
    BURST_REQUESTED_SIZE_KEY, ALLOCATED_SIZE_KEY, UNALLOCATED_SIZE_KEY
from titus_isolate.model.processor.config import get_cpu
from titus_isolate.model.processor.utils import DEFAULT_TOTAL_THREAD_COUNT, is_cpu_full
from titus_isolate.monitor.oversubscribe_free_thread_provider import OversubscribeFreeThreadProvider
from titus_isolate.utils import set_config_manager, set_workload_monitor_manager

config_logs(logging.DEBUG)
set_config_manager(ConfigManager(TestPropertyProvider({})))
set_workload_monitor_manager(TestWorkloadMonitorManager())

forecast_ip_alloc_simple = ForecastIPCpuAllocator(
    TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})),
    OversubscribeFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))

LEGACY_ALLOCATORS = [IntegerProgramCpuAllocator(), GreedyCpuAllocator()]
OVERSUBSCRIBING_ALLOCATORS = [forecast_ip_alloc_simple]
ALLOCATORS = LEGACY_ALLOCATORS + OVERSUBSCRIBING_ALLOCATORS


class TestWorkloadManager(unittest.TestCase):
    def test_single_static_workload_lifecycle(self):
        for allocator in ALLOCATORS:
            thread_count = 2
            workload = get_test_workload(uuid.uuid4(), thread_count, STATIC)

            cgroup_manager = MockCgroupManager()
            workload_manager = WorkloadManager(get_cpu(), cgroup_manager,
                                               allocator)
 def test_low_static_usage(self):
     # Oversubscribe
     free_threads = self.__test_uniform_usage(
         TEST_THRESHOLD_USAGE,
         OversubscribeFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))
     self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(free_threads))