def test_optimize(self):
     with MockTransaction:
         step = 0.1
         limit = 1
         otf = 0.3
         migration_time = 20.
         ls = [lambda x: x, lambda x: x]
         p = [[0, 1]]
         state_vector = [0, 1]
         time_in_states = 10
         time_in_state_n = 5
         objective = mock('objective')
         constraint = mock('constraint')
         solution = [1, 2, 3]
         expect(nlp).build_objective(ls, state_vector, p). \
             and_return(objective).once()
         expect(nlp).build_constraint(
             otf, migration_time, ls, state_vector,
             p, time_in_states, time_in_state_n). \
             and_return(constraint).once()
         expect(b).solve2(objective, constraint, step, limit). \
             and_return(solution).once()
         self.assertEqual(
             b.optimize(step, limit, otf, migration_time, ls,
                        p, state_vector, time_in_states, time_in_state_n),
             solution)
Example #2
0
def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
         time_step, migration_time, utilization, state):
    """ The MHOD algorithm returning whether the host is overloaded.

    :param state_config: The state configuration.
     :type state_config: list(float)

    :param otf: The OTF parameter.
     :type otf: float,>0

    :param window_sizes: A list of window sizes.
     :type window_sizes: list(int)

    :param bruteforce_step: The step of the bruteforce algorithm.
     :type bruteforce_step: float

    :param time_step: The length of the simulation time step in seconds.
     :type time_step: int,>=0

    :param migration_time: The VM migration time in time seconds.
     :type migration_time: float,>=0

    :param utilization: The history of the host's CPU utilization.
     :type utilization: list(float)

    :param state: The state of the algorithm.
     :type state: dict

    :return: The updated state and decision of the algorithm.
     :rtype: tuple(bool, dict)
    """
    utilization_length = len(utilization)
#    if utilization_length == state['time_in_states'] and \
#      utilization == state['previous_utilization']:
#        # No new utilization values
#        return False, state

    number_of_states = len(state_config) + 1
    previous_state = 0
#    state['previous_utilization'] = utilization
    state['request_windows'] = estimation.init_request_windows(
        number_of_states, max(window_sizes))
    state['estimate_windows'] = estimation.init_deque_structure(
        window_sizes, number_of_states)
    state['variances'] = estimation.init_variances(
        window_sizes, number_of_states)
    state['acceptable_variances'] = estimation.init_variances(
        window_sizes, number_of_states)

    for i, current_state in enumerate(utilization_to_states(state_config, utilization)):
        state['request_windows'] = estimation.update_request_windows(
            state['request_windows'],
            previous_state,
            current_state)
        state['estimate_windows'] = estimation.update_estimate_windows(
            state['estimate_windows'],
            state['request_windows'],
            previous_state)
        state['variances'] = estimation.update_variances(
            state['variances'],
            state['estimate_windows'],
            previous_state)
        state['acceptable_variances'] = estimation.update_acceptable_variances(
            state['acceptable_variances'],
            state['estimate_windows'],
            previous_state)
        previous_state = current_state

    selected_windows = estimation.select_window(
        state['variances'],
        state['acceptable_variances'],
        window_sizes)
    p = estimation.select_best_estimates(
        state['estimate_windows'],
        selected_windows)
    # These two are saved for testing purposes
    state['selected_windows'] = selected_windows
    state['p'] = p

    state_vector = build_state_vector(state_config, utilization)
    current_state = get_current_state(state_vector)
    state['previous_state'] = current_state

    state_n = len(state_config)
#    if utilization_length > state['time_in_states'] + 1:
#        for s in utilization_to_states(
#                state_config,
#                utilization[-(utilization_length - state['time_in_states']):]):
#            state['time_in_states'] += 1
#            if s == state_n:
#                state['time_in_state_n'] += 1
#    else:
    state['time_in_states'] += 1
    if current_state == state_n:
        state['time_in_state_n'] += 1

    if log.isEnabledFor(logging.DEBUG):
        log.debug('MHOD utilization:' + str(utilization))
        log.debug('MHOD time_in_states:' + str(state['time_in_states']))
        log.debug('MHOD time_in_state_n:' + str(state['time_in_state_n']))
        log.debug('MHOD p:' + str(p))
        log.debug('MHOD current_state:' + str(current_state))
        log.debug('MHOD p[current_state]:' + str(p[current_state]))

    if utilization_length >= learning_steps:
        if current_state == state_n and p[state_n][state_n] > 0:
        # if p[current_state][state_n] > 0:
            policy = bruteforce.optimize(
                bruteforce_step, 1.0, otf, (migration_time / time_step), ls, p,
                state_vector, state['time_in_states'], state['time_in_state_n'])
            # This is saved for testing purposes
            state['policy'] = policy
            if log.isEnabledFor(logging.DEBUG):
                log.debug('MHOD policy:' + str(policy))
            command = issue_command_deterministic(policy)
            if log.isEnabledFor(logging.DEBUG):
                log.debug('MHOD command:' + str(command))
            return command, state
    return False, state