Exemple #1
0
    def test_empty_usage_all_threads_claimed(self):
        # Assign a workload to a CPU
        cpu = get_cpu()
        workload = get_test_workload("a", len(cpu.get_threads()), STATIC)
        cpu = self.__assign_workload(cpu, workload)

        free_thread_provider = ThresholdFreeThreadProvider(
            total_threshold=DEFAULT_TOTAL_THRESHOLD)
        free_threads = free_thread_provider.get_free_threads(
            cpu, {}, {workload.get_id(): workload})
        self.assertEqual([], free_threads)
Exemple #2
0
    def test_forecast_threshold_no_usage(self):
        allocator = ForecastIPCpuAllocator(
            TestCpuUsagePredictorManager(),
            ConfigManager(TestPropertyProvider({})),
            ThresholdFreeThreadProvider(0.1))

        thread_count = DEFAULT_TOTAL_THREAD_COUNT / 2
        cpu = get_cpu()

        w0 = get_test_workload(uuid.uuid4(), thread_count, STATIC)

        request = AllocateThreadsRequest(cpu, w0.get_id(), {w0.get_id(): w0},
                                         {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        # All cores should be occupied
        for c in cpu.get_cores():
            self.assertEqual(1, len(c.get_empty_threads()))

        w1 = get_test_workload(uuid.uuid4(), thread_count, BURST)
        request = AllocateThreadsRequest(cpu, w1.get_id(), {
            w0.get_id(): w0,
            w1.get_id(): w1
        }, {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        # No threads should be shared
        for c in cpu.get_cores():
            self.assertEqual(c.get_threads()[0].get_workload_ids(),
                             c.get_threads()[1].get_workload_ids())
Exemple #3
0
    def test_high_static_usage(self):
        # Threshold
        free_threads = self.__test_uniform_usage(
            TEST_THRESHOLD_USAGE + 0.001,
            ThresholdFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))
        self.assertEqual(TEST_WORKLOAD_THREAD_COUNT * 2, len(free_threads))

        # Oversubscribe
        free_threads = self.__test_uniform_usage(
            TEST_THRESHOLD_USAGE + 0.001,
            OversubscribeFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))
        self.assertEqual(TEST_WORKLOAD_THREAD_COUNT * 2, len(free_threads))
Exemple #4
0
    def test_forecast_threshold_usage(self):
        allocator = ForecastIPCpuAllocator(
            TestCpuUsagePredictorManager(TestCpuUsagePredictor(10)),
            ConfigManager(TestPropertyProvider({})),
            ThresholdFreeThreadProvider(0.05))

        thread_count = DEFAULT_TOTAL_THREAD_COUNT / 4
        cpu = get_cpu()

        w0 = get_test_workload(uuid.uuid4(), thread_count, STATIC)
        log.info(w0)

        request = AllocateThreadsRequest(cpu, w0.get_id(), {w0.get_id(): w0},
                                         {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        # All cores should be occupied
        for c in cpu.get_cores():
            self.assertTrue(
                len(c.get_empty_threads()) == 1
                or len(c.get_empty_threads()) == 2)

        w1 = get_test_workload(uuid.uuid4(), thread_count, BURST)
        log.info(w1)
        request = AllocateThreadsRequest(cpu, w1.get_id(), {
            w0.get_id(): w0,
            w1.get_id(): w1
        }, {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()
        log.info(cpu)

        for c in cpu.get_cores():
            # Static workload should have unshared cores
            if len(c.get_empty_threads()) == 1:
                for t in c.get_threads():
                    if t.is_claimed():
                        self.assertEqual([w0.get_id()], t.get_workload_ids())
            # Burst workload should have shared cores only with itself
            if len(c.get_empty_threads()) == 0:
                self.assertEqual(c.get_threads()[0].get_workload_ids(),
                                 c.get_threads()[1].get_workload_ids())
                self.assertEqual([w1.get_id()],
                                 c.get_threads()[1].get_workload_ids())
Exemple #5
0
def get_free_thread_provider(
        config_manager: ConfigManager) -> FreeThreadProvider:
    free_thread_provider_str = config_manager.get_str(
        FREE_THREAD_PROVIDER, DEFAULT_FREE_THREAD_PROVIDER)
    free_thread_provider = None

    total_threshold = config_manager.get_float(TOTAL_THRESHOLD,
                                               DEFAULT_TOTAL_THRESHOLD)

    if free_thread_provider_str == EMPTY:
        free_thread_provider = EmptyFreeThreadProvider()
    elif free_thread_provider_str == THRESHOLD:
        free_thread_provider = ThresholdFreeThreadProvider(total_threshold)
    elif free_thread_provider_str == OVERSUBSCRIBE:
        free_thread_provider = OversubscribeFreeThreadProvider(total_threshold)

    log.debug("Free thread provider: '{}'".format(
        free_thread_provider.__class__.__name__))
    return free_thread_provider
Exemple #6
0
    def test_forecast_ip_burst_pool_with_usage(self):
        class UsagePredictorWithBurst:
            def __init__(self):
                self.__model = TestPredictor()

            def predict(self, workload: Workload,
                        cpu_usage_last_hour: np.array,
                        pred_env: PredEnvironment) -> float:
                if workload.get_id() == 'static_a':
                    return workload.get_thread_count() * 0.8
                elif workload.get_id() == 'static_b':
                    return workload.get_thread_count() * 0.01
                elif workload.get_id() == 'burst_c':
                    return workload.get_thread_count() * 0.9

            def get_model(self):
                return self.__model

        upm = TestCpuUsagePredictorManager(UsagePredictorWithBurst())
        cm = ConfigManager(
            TestPropertyProvider({BURST_CORE_COLLOC_USAGE_THRESH: 0.9}))
        allocator = ForecastIPCpuAllocator(upm, cm,
                                           ThresholdFreeThreadProvider(0.1))

        cpu = get_cpu(package_count=2, cores_per_package=16)
        w_a = get_test_workload("static_a", 14, STATIC)
        w_b = get_test_workload("static_b", 14, STATIC)
        w_c = get_test_workload("burst_c", 2, BURST)

        request = AllocateThreadsRequest(cpu, "static_a", {"static_a": w_a},
                                         {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()

        request = AllocateThreadsRequest(cpu, "burst_c", {
            "static_a": w_a,
            "burst_c": w_c
        }, {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()
        # with an aggressive burst pool expansion, burst should be collocated with static on cores:
        self.assertLess(40, len(cpu.get_claimed_threads()))
        num_burst_1 = len(cpu.get_workload_ids_to_thread_ids()["burst_c"])

        request = AllocateThreadsRequest(cpu, "static_b", {
            "static_a": w_a,
            "static_b": w_b,
            "burst_c": w_c
        }, {}, DEFAULT_TEST_REQUEST_METADATA)
        cpu = allocator.assign_threads(request).get_cpu()
        # burst should retract, and prefer collocation with b over a:
        num_burst_2 = len(cpu.get_workload_ids_to_thread_ids()["burst_c"])
        self.assertLessEqual(num_burst_2, num_burst_1)

        colloc_a = 0
        colloc_b = 0
        for p in cpu.get_packages():
            for c in p.get_cores():
                t1 = c.get_threads()[0]
                t2 = c.get_threads()[1]
                if t1.is_claimed() and t2.is_claimed():
                    wt1 = t1.get_workload_ids()[0]
                    wt2 = t2.get_workload_ids()[0]
                    if (wt1 == 'static_a'
                            and wt2 == 'burst_c') or (wt1 == 'burst_c'
                                                      and wt2 == 'static_a'):
                        colloc_a += 1
                    elif (wt1 == 'static_b'
                          and wt2 == 'burst_c') or (wt1 == 'burst_c'
                                                    and wt2 == 'static_b'):
                        colloc_b += 1
        self.assertLessEqual(colloc_a, colloc_b)
Exemple #7
0
    def set_predictor(self, predictor):
        self.__predictor = predictor


class TestWorkloadMonitorManager(CpuUsageProvider):
    def __init__(self, cpu_usage={}):
        self.__cpu_usage = cpu_usage

    def get_cpu_usage(self, seconds: int, agg_granularity_secs: int) -> dict:
        return self.__cpu_usage


forecast_ip_alloc_simple = ForecastIPCpuAllocator(
    TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})),
    ThresholdFreeThreadProvider(0.1))

ALLOCATORS = [
    NaiveCpuAllocator(),
    IntegerProgramCpuAllocator(),
    GreedyCpuAllocator(), forecast_ip_alloc_simple
]
OVER_ALLOCATORS = [NaiveCpuAllocator()]

set_workload_monitor_manager(TestWorkloadMonitorManager())


class TestCpu(unittest.TestCase):
    def test_assign_one_thread_empty_cpu(self):
        """
        Workload 0: 1 thread --> (p:0 c:0 t:0)
from titus_isolate.metrics.constants import RUNNING, ADDED_KEY, REMOVED_KEY, SUCCEEDED_KEY, FAILED_KEY, \
    WORKLOAD_COUNT_KEY, PACKAGE_VIOLATIONS_KEY, CORE_VIOLATIONS_KEY, IP_ALLOCATOR_TIMEBOUND_COUNT, \
    OVERSUBSCRIBED_THREADS_KEY, STATIC_ALLOCATED_SIZE_KEY, BURST_ALLOCATED_SIZE_KEY, \
    BURST_REQUESTED_SIZE_KEY, ALLOCATED_SIZE_KEY, UNALLOCATED_SIZE_KEY
from titus_isolate.model.processor.config import get_cpu
from titus_isolate.model.processor.utils import DEFAULT_TOTAL_THREAD_COUNT, is_cpu_full
from titus_isolate.monitor.threshold_free_thread_provider import ThresholdFreeThreadProvider
from titus_isolate.utils import set_config_manager, set_workload_monitor_manager

config_logs(logging.DEBUG)
set_config_manager(ConfigManager(TestPropertyProvider({})))
set_workload_monitor_manager(TestWorkloadMonitorManager())

forecast_ip_alloc_simple = ForecastIPCpuAllocator(
    TestCpuUsagePredictorManager(), ConfigManager(TestPropertyProvider({})),
    ThresholdFreeThreadProvider(DEFAULT_TOTAL_THRESHOLD))

ALLOCATORS = [
    IntegerProgramCpuAllocator(),
    GreedyCpuAllocator(), forecast_ip_alloc_simple
]


class TestWorkloadManager(unittest.TestCase):
    def test_single_static_workload_lifecycle(self):
        for allocator in ALLOCATORS:
            thread_count = 2
            workload = get_test_workload(uuid.uuid4(), thread_count, STATIC)

            cgroup_manager = MockCgroupManager()
            workload_manager = WorkloadManager(get_cpu(), cgroup_manager,