Пример #1
0
    def test_publish_window(self):
        set_config_manager(ConfigManager(TestPropertyProvider({})))
        set_workload_monitor_manager(TestWorkloadMonitorManager())
        window_publisher = TestOpportunisticWindowPublisher(
            get_current_end_func=lambda: datetime.utcnow() - timedelta(minutes=
                                                                       1),
            add_window_func=lambda: None,
        )

        w_id = str(uuid.uuid4())
        workload = get_test_workload(w_id, 1, STATIC)

        set_cpu_usage_predictor_manager(
            TestCpuUsagePredictorManager(
                TestSimpleCpuPredictor({w_id:
                                        DEFAULT_TOTAL_THRESHOLD - 0.001})))

        oeh = OversubscribeEventHandler(TestWorkloadManager([workload]),
                                        window_publisher)
        oeh._handle(json.loads(OVERSUBSCRIBE_EVENT.decode("utf-8")))

        self.assertEqual(0, oeh.get_skip_count())
        self.assertEqual(1, oeh.get_success_count())
        self.assertEqual(1, window_publisher.get_current_end_count)
        self.assertEqual(1, window_publisher.add_window_count)
Пример #2
0
    def test_skip_active_window(self):
        set_config_manager(ConfigManager(TestPropertyProvider({})))
        window_publisher = TestOpportunisticWindowPublisher(
            is_window_active_func=lambda: True,
            add_window_func=lambda: None,
            cleanup_func=lambda: None)

        oeh = OversubscribeEventHandler(TestWorkloadManager([]),
                                        window_publisher)
        oeh._handle(json.loads(OVERSUBSCRIBE_EVENT.decode("utf-8")))

        self.assertEqual(1, oeh.get_skip_count())
        self.assertEqual(1, window_publisher.is_window_active_count)
Пример #3
0
    def test_skip_active_window(self):
        set_config_manager(ConfigManager(TestPropertyProvider({})))
        window_publisher = TestOpportunisticWindowPublisher(
            get_current_end_func=lambda: datetime.utcnow() + timedelta(minutes=
                                                                       5),
            add_window_func=lambda: None,
        )

        oeh = OversubscribeEventHandler(TestWorkloadManager([]),
                                        window_publisher)
        oeh._handle(json.loads(OVERSUBSCRIBE_EVENT.decode("utf-8")))

        self.assertEqual(1, oeh.get_skip_count())
        self.assertEqual(1, window_publisher.get_current_end_count)
Пример #4
0
    def test_get_workloads_endpoint(self):
        set_config_manager(ConfigManager(TestPropertyProvider({})))

        thread_count = 2
        workload_id = str(uuid.uuid4())
        workload = get_test_workload(workload_id, thread_count, STATIC)

        workload_manager = self.__get_default_workload_manager()
        set_workload_manager(workload_manager)

        workloads = json.loads(get_workloads())
        self.assertEqual(0, len(workloads))

        workload_manager.add_workload(workload)

        workloads = json.loads(get_workloads())
        self.assertEqual(workload_id, workloads[0]["id"])
        self.assertEqual(STATIC, workloads[0]["type"])
        self.assertEqual(thread_count, workloads[0]["thread_count"])
    def test_single_workload_memory_settings(self):
        for allocator in ALLOCATORS:
            thread_count = 2
            workload = get_test_workload(uuid.uuid4(), thread_count, STATIC)

            cgroup_manager = MockCgroupManager()
            workload_manager = WorkloadManager(get_cpu(), cgroup_manager,
                                               allocator)

            # With an empty configuration we should expect default False behavior
            # for all memory flags
            set_config_manager(ConfigManager(TestPropertyProvider({})))

            workload_manager.add_workload(workload)
            self.assertFalse(
                cgroup_manager.get_memory_migrate(workload.get_id()))
            self.assertFalse(
                cgroup_manager.get_memory_spread_page(workload.get_id()))
            self.assertFalse(
                cgroup_manager.get_memory_spread_slab(workload.get_id()))
            workload_manager.remove_workload(workload.get_id())

            # With all memory configuration options set to True we should expect all memory
            # flags to be set to True
            set_config_manager(
                ConfigManager(
                    TestPropertyProvider({
                        TITUS_ISOLATE_MEMORY_MIGRATE:
                        True,
                        TITUS_ISOLATE_MEMORY_SPREAD_PAGE:
                        True,
                        TITUS_ISOLATE_MEMORY_SPREAD_SLAB:
                        True,
                    })))

            workload_manager.add_workload(workload)
            self.assertTrue(
                cgroup_manager.get_memory_migrate(workload.get_id()))
            self.assertTrue(
                cgroup_manager.get_memory_spread_page(workload.get_id()))
            self.assertTrue(
                cgroup_manager.get_memory_spread_slab(workload.get_id()))
            workload_manager.remove_workload(workload.get_id())
Пример #6
0
    log.info("Isolating currently running workloads...")
    for workload in get_current_workloads(docker.from_env()):
        try:
            workload_manager.add_workload(workload)
        except:
            log.exception(
                "Failed to add currently running workload: '{}', maybe it exited."
                .format(workload.get_id()))

    log.info("Isolated currently running workloads.")
    # Start processing events after adding running workloads to avoid processing a die event before we add a workload
    event_manager.start_processing_events()


if __name__ != '__main__' and not is_testing():
    set_config_manager(ConfigManager(EnvPropertyProvider))
    log.info("Configuring logging...")
    gunicorn_logger = logging.getLogger('gunicorn.error')
    app.logger.handlers = gunicorn_logger.handlers
    app.logger.setLevel(gunicorn_logger.level)

    # Set the schedule library's logging level higher so it doesn't spam messages every time it schedules a task
    logging.getLogger('schedule').setLevel(logging.WARN)

    exit_handler = RealExitHandler()

    if is_kubernetes():
        log.info("Setting pod manager...")
        pod_manager = PodManager()
        pod_manager.start()
        set_pod_manager(pod_manager)
Пример #7
0
        self.__reg.gauge(SOLVER_ASSIGN_THREADS_FAILURE, tags).set(assign_threads_failure_count)
        self.__reg.gauge(SOLVER_FREE_THREADS_SUCCESS, tags).set(free_threads_success_count)
        self.__reg.gauge(SOLVER_FREE_THREADS_FAILURE, tags).set(free_threads_failure_count)
        self.__reg.gauge(SOLVER_REBALANCE_SUCCESS, tags).set(rebalance_success_count)
        self.__reg.gauge(SOLVER_REBALANCE_FAILURE, tags).set(rebalance_failure_count)


if __name__ != '__main__' and not is_testing():
    log.info("Configuring logging...")
    gunicorn_logger = logging.getLogger('gunicorn.error')
    app.logger.handlers = gunicorn_logger.handlers
    app.logger.setLevel(gunicorn_logger.level)

    log.info("Setting config manager...")
    config_manager = get_config_manager(EnvPropertyProvider())
    set_config_manager(config_manager)

    log.info("Setting event log manager...")
    set_event_log_manager(KeystoneEventLogManager())

    log.info("Setting up the cpu usage predictor manager...")
    set_cpu_usage_predictor_manager(ConfigurableCpuUsagePredictorManager())

    log.info("Setting cpu_allocators...")

    assign_alloc_str = config_manager.get_str(REMOTE_ASSIGN_ALLOCATOR)
    free_alloc_str = config_manager.get_str(REMOTE_FREE_ALLOCATOR)
    rebalance_alloc_str = config_manager.get_str(REMOTE_REBALANCE_ALLOCATOR)
    log.info("Setting cpu_allocators to assign: {}, free: {}, rebalance: {}".format(
        assign_alloc_str, free_alloc_str, rebalance_alloc_str))
Пример #8
0
DEFAULT_TEST_CPU = 1
DEFAULT_TEST_MEM = 256
DEFAULT_TEST_DISK = 512
DEFAULT_TEST_NETWORK = 1024
DEFAULT_TEST_APP_NAME = 'test_app_name'
DEFAULT_TEST_OWNER_EMAIL = '*****@*****.**'
DEFAULT_TEST_IMAGE = 'test_image'
DEFAULT_TEST_CMD = 'test_cmd'
DEFAULT_TEST_ENTRYPOINT = 'test_entrypoint'
DEFAULT_TEST_JOB_TYPE = 'SERVICE'
DEFAULT_TEST_WORKLOAD_TYPE = 'static'
DEFAULT_TEST_INSTANCE_ID = 'test_instance_id'
DEFAULT_TEST_REQUEST_METADATA = {INSTANCE_ID: DEFAULT_TEST_INSTANCE_ID}
DEFAULT_TEST_OPPORTUNISTIC_THREAD_COUNT = 0

set_config_manager(ConfigManager(TestPropertyProvider({})))


def wait_until(func, timeout=DEFAULT_TIMEOUT_SECONDS, period=0.01):
    deadline = time.time() + timeout
    while time.time() < deadline:
        if func():
            return
        time.sleep(period)

    raise TimeoutError(
        "Function did not succeed within timeout: '{}'.".format(timeout))


def counter_value_equals(registry, key, expected_value, tags={}):
    value = registry.counter(key, tags).count()
Пример #9
0
 def test_parse_from_file(self):
     set_config_manager(ConfigManager(TestPropertyProvider({})))
     dir = os.path.dirname(os.path.abspath(__file__))
     self.assertEqual(expected_path, get_cgroup_path_from_file(dir + "/test_cgroup_file", CPUSET))
Пример #10
0
    def test_wait_for_file_to_exist(self):
        set_config_manager(ConfigManager(TestPropertyProvider({})))
        with self.assertRaises(TimeoutError):
            _wait_for_file_to_exist("/tmp/foo", 0.1)

        _wait_for_file_to_exist(__file__, 0.1)