def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps, time_step, migration_time, utilization, state): """ The MHOD algorithm returning whether the host is overloaded. :param state_config: The state configuration. :type state_config: list(float) :param otf: The OTF parameter. :type otf: float,>0 :param window_sizes: A list of window sizes. :type window_sizes: list(int) :param bruteforce_step: The step of the bruteforce algorithm. :type bruteforce_step: float :param time_step: The length of the simulation time step in seconds. :type time_step: int,>=0 :param migration_time: The VM migration time in time seconds. :type migration_time: float,>=0 :param utilization: The history of the host's CPU utilization. :type utilization: list(float) :param state: The state of the algorithm. :type state: dict :return: The updated state and decision of the algorithm. :rtype: tuple(bool, dict) """ utilization_length = len(utilization) # if utilization_length == state['time_in_states'] and \ # utilization == state['previous_utilization']: # # No new utilization values # return False, state number_of_states = len(state_config) + 1 previous_state = 0 # state['previous_utilization'] = utilization state['request_windows'] = estimation.init_request_windows( number_of_states, max(window_sizes)) state['estimate_windows'] = estimation.init_deque_structure( window_sizes, number_of_states) state['variances'] = estimation.init_variances( window_sizes, number_of_states) state['acceptable_variances'] = estimation.init_variances( window_sizes, number_of_states) for i, current_state in enumerate(utilization_to_states(state_config, utilization)): state['request_windows'] = estimation.update_request_windows( state['request_windows'], previous_state, current_state) state['estimate_windows'] = estimation.update_estimate_windows( state['estimate_windows'], state['request_windows'], previous_state) state['variances'] = estimation.update_variances( state['variances'], state['estimate_windows'], previous_state) state['acceptable_variances'] = estimation.update_acceptable_variances( state['acceptable_variances'], state['estimate_windows'], previous_state) previous_state = current_state selected_windows = estimation.select_window( state['variances'], state['acceptable_variances'], window_sizes) p = estimation.select_best_estimates( state['estimate_windows'], selected_windows) # These two are saved for testing purposes state['selected_windows'] = selected_windows state['p'] = p state_vector = build_state_vector(state_config, utilization) current_state = get_current_state(state_vector) state['previous_state'] = current_state state_n = len(state_config) # if utilization_length > state['time_in_states'] + 1: # for s in utilization_to_states( # state_config, # utilization[-(utilization_length - state['time_in_states']):]): # state['time_in_states'] += 1 # if s == state_n: # state['time_in_state_n'] += 1 # else: state['time_in_states'] += 1 if current_state == state_n: state['time_in_state_n'] += 1 if log.isEnabledFor(logging.DEBUG): log.debug('MHOD utilization:' + str(utilization)) log.debug('MHOD time_in_states:' + str(state['time_in_states'])) log.debug('MHOD time_in_state_n:' + str(state['time_in_state_n'])) log.debug('MHOD p:' + str(p)) log.debug('MHOD current_state:' + str(current_state)) log.debug('MHOD p[current_state]:' + str(p[current_state])) if utilization_length >= learning_steps: if current_state == state_n and p[state_n][state_n] > 0: # if p[current_state][state_n] > 0: policy = bruteforce.optimize( bruteforce_step, 1.0, otf, (migration_time / time_step), ls, p, state_vector, state['time_in_states'], state['time_in_state_n']) # This is saved for testing purposes state['policy'] = policy if log.isEnabledFor(logging.DEBUG): log.debug('MHOD policy:' + str(policy)) command = issue_command_deterministic(policy) if log.isEnabledFor(logging.DEBUG): log.debug('MHOD command:' + str(command)) return command, state return False, state
def mhod( state_config, otf, window_sizes, bruteforce_step, learning_steps, time_step, migration_time, utilization, state ): """ The MHOD algorithm returning whether the host is overloaded. :param state_config: The state configuration. :type state_config: list(float) :param otf: The OTF parameter. :type otf: float,>0 :param window_sizes: A list of window sizes. :type window_sizes: list(int) :param bruteforce_step: The step of the bruteforce algorithm. :type bruteforce_step: float :param time_step: The length of the simulation time step in seconds. :type time_step: int,>=0 :param migration_time: The VM migration time in time seconds. :type migration_time: float,>=0 :param utilization: The history of the host's CPU utilization. :type utilization: list(float) :param state: The state of the algorithm. :type state: dict :return: The updated state and decision of the algorithm. :rtype: tuple(bool, dict) """ utilization_length = len(utilization) # if utilization_length == state['time_in_states'] and \ # utilization == state['previous_utilization']: # # No new utilization values # return False, state number_of_states = len(state_config) + 1 previous_state = 0 # state['previous_utilization'] = utilization state["request_windows"] = estimation.init_request_windows(number_of_states, max(window_sizes)) state["estimate_windows"] = estimation.init_deque_structure(window_sizes, number_of_states) state["variances"] = estimation.init_variances(window_sizes, number_of_states) state["acceptable_variances"] = estimation.init_variances(window_sizes, number_of_states) for i, current_state in enumerate(utilization_to_states(state_config, utilization)): state["request_windows"] = estimation.update_request_windows( state["request_windows"], previous_state, current_state ) state["estimate_windows"] = estimation.update_estimate_windows( state["estimate_windows"], state["request_windows"], previous_state ) state["variances"] = estimation.update_variances(state["variances"], state["estimate_windows"], previous_state) state["acceptable_variances"] = estimation.update_acceptable_variances( state["acceptable_variances"], state["estimate_windows"], previous_state ) previous_state = current_state selected_windows = estimation.select_window(state["variances"], state["acceptable_variances"], window_sizes) p = estimation.select_best_estimates(state["estimate_windows"], selected_windows) # These two are saved for testing purposes state["selected_windows"] = selected_windows state["p"] = p state_vector = build_state_vector(state_config, utilization) current_state = get_current_state(state_vector) state["previous_state"] = current_state state_n = len(state_config) # if utilization_length > state['time_in_states'] + 1: # for s in utilization_to_states( # state_config, # utilization[-(utilization_length - state['time_in_states']):]): # state['time_in_states'] += 1 # if s == state_n: # state['time_in_state_n'] += 1 # else: state["time_in_states"] += 1 if current_state == state_n: state["time_in_state_n"] += 1 if log.isEnabledFor(logging.DEBUG): log.debug("MHOD utilization:" + str(utilization)) log.debug("MHOD time_in_states:" + str(state["time_in_states"])) log.debug("MHOD time_in_state_n:" + str(state["time_in_state_n"])) log.debug("MHOD p:" + str(p)) log.debug("MHOD current_state:" + str(current_state)) log.debug("MHOD p[current_state]:" + str(p[current_state])) if utilization_length >= learning_steps: if current_state == state_n and p[state_n][state_n] > 0: # if p[current_state][state_n] > 0: policy = bruteforce.optimize( bruteforce_step, 1.0, otf, (migration_time / time_step), ls, p, state_vector, state["time_in_states"], state["time_in_state_n"], ) # This is saved for testing purposes state["policy"] = policy if log.isEnabledFor(logging.DEBUG): log.debug("MHOD policy:" + str(policy)) command = issue_command_deterministic(policy) if log.isEnabledFor(logging.DEBUG): log.debug("MHOD command:" + str(command)) return command, state return False, state
def test_update_acceptable_variances(self): est_win = [[{2: deque([0, 0.5], 2), 4: deque([1, 0, 0, 0], 4)}, {2: deque([1.0, 0.5], 2), 4: deque([0, 1, 1, 1], 4)}], [{2: deque([0.5, 0.25], 2), 4: deque([0.25, 0.25, 0.5, 0.5], 4)}, {2: deque([0.5, 0.75], 2), 4: deque([0.75, 0.75, 0.5, 0.5], 4)}]] acc_variances = [[{2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0, 4: 0}, {2: 0, 4: 0}]] self.assertEqual(m.update_acceptable_variances(c(acc_variances), c(est_win), 0), [[{2: 0.125, 4: 0.0}, {2: 0.125, 4: 0.0}], [{2: 0, 4: 0}, {2: 0, 4: 0}]]) self.assertEqual(m.update_acceptable_variances(c(acc_variances), c(est_win), 1), [[{2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0.09375, 4: 0.0625}, {2: 0.09375, 4: 0.0625}]]) self.assertEqual(m.update_acceptable_variances( m.update_acceptable_variances( c(acc_variances), c(est_win), 0), c(est_win), 0), [[{2: 0.125, 4: 0.0}, {2: 0.125, 4: 0.0}], [{2: 0, 4: 0}, {2: 0, 4: 0}]]) est_win = [[{2: deque([0, 0], 2), 4: deque([1, 0, 0, 0], 4)}, {2: deque([1, 1], 2), 4: deque([0, 0, 1, 1], 4)}, {2: deque([0, 0], 2), 4: deque([0, 1, 0, 0], 4)}], [{2: deque([0.5, 0.25], 2), 4: deque([0.25, 0.05, 0.5, 0.25], 4)}, {2: deque([0.25, 0.5], 2), 4: deque([0.4, 0.55, 0.25, 0.5], 4)}, {2: deque([0.25, 0.25], 2), 4: deque([0.35, 0.4, 0.25, 0.25], 4)}], [{2: deque([1, 0], 2), 4: deque([1, 0, 1, 0], 4)}, {2: deque([0, 1], 2), 4: deque([0, 0, 0, 1], 4)}, {2: deque([0, 0], 2), 4: deque([0, 1, 0, 0], 4)}]] acc_variances = [[{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}]] self.assertEqual(m.update_acceptable_variances(c(acc_variances), c(est_win), 0), [[{2: 0.0, 4: 0.0}, {2: 0.0, 4: 0.0}, {2: 0.0, 4: 0.0}], [{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}]]) self.assertEqual(m.update_acceptable_variances(c(acc_variances), c(est_win), 1), [[{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0.09375, 4: 0.046875}, {2: 0.125, 4: 0.0625}, {2: 0.09375, 4: 0.046875}], [{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}]]) self.assertEqual(m.update_acceptable_variances(c(acc_variances), c(est_win), 2), [[{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0, 4: 0}, {2: 0, 4: 0}, {2: 0, 4: 0}], [{2: 0.0, 4: 0.0}, {2: 0.0, 4: 0.0}, {2: 0.0, 4: 0.0}]])