示例#1
0
def init_state(history_size, window_sizes, number_of_states):
    """Initialize the state dictionary of the MHOD algorithm.

    :param history_size: The number of last system states to store.
    :param window_sizes: The required window sizes.
    :param number_of_states: The number of states.
    :return: The initialization state dictionary.
    """
    return {
        'previous_state':
        0,
        'previous_utilization': [],
        'time_in_states':
        0,
        'time_in_state_n':
        0,
        'request_windows':
        estimation.init_request_windows(number_of_states, max(window_sizes)),
        'estimate_windows':
        estimation.init_deque_structure(window_sizes, number_of_states),
        'variances':
        estimation.init_variances(window_sizes, number_of_states),
        'acceptable_variances':
        estimation.init_variances(window_sizes, number_of_states)
    }
示例#2
0
def init_state(history_size, window_sizes, number_of_states):
    """Initialize the state dictionary of the MHOD algorithm.

    :param history_size: The number of last system states to store.
    :param window_sizes: The required window sizes.
    :param number_of_states: The number of states.
    :return: The initialization state dictionary.
    """
    return {
        'previous_state': 0,
        'previous_utilization': [],
        'time_in_states': 0,
        'time_in_state_n': 0,
        'request_windows': estimation.init_request_windows(
            number_of_states, max(window_sizes)),
        'estimate_windows': estimation.init_deque_structure(
            window_sizes, number_of_states),
        'variances': estimation.init_variances(
            window_sizes, number_of_states),
        'acceptable_variances': estimation.init_variances(
            window_sizes, number_of_states)}
示例#3
0
def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
         time_step, migration_time, utilization, state):
    """The MHOD algorithm returning whether the host is overloaded.

    :param state_config: The state configuration.
    :param otf: The OTF parameter.
    :param window_sizes: A list of window sizes.
    :param bruteforce_step: The step of the bruteforce algorithm.
    :param time_step: The length of the simulation time step in seconds.
    :param migration_time: The VM migration time in time seconds.
    :param utilization: The history of the host's CPU utilization.
    :param state: The state of the algorithm.
    :return: The updated state and decision of the algorithm.
    """
    utilization_length = len(utilization)
    #    if utilization_length == state['time_in_states'] and \
    #      utilization == state['previous_utilization']:
    #        # No new utilization values
    #        return False, state

    number_of_states = len(state_config) + 1
    previous_state = 0
    #    state['previous_utilization'] = utilization
    state['request_windows'] = estimation.init_request_windows(
        number_of_states, max(window_sizes))
    state['estimate_windows'] = estimation.init_deque_structure(
        window_sizes, number_of_states)
    state['variances'] = estimation.init_variances(
        window_sizes, number_of_states)
    state['acceptable_variances'] = estimation.init_variances(
        window_sizes, number_of_states)

    for i, current_state in enumerate(
            utilization_to_states(state_config, utilization)):
        state['request_windows'] = estimation.update_request_windows(
            state['request_windows'],
            previous_state,
            current_state)
        state['estimate_windows'] = estimation.update_estimate_windows(
            state['estimate_windows'],
            state['request_windows'],
            previous_state)
        state['variances'] = estimation.update_variances(
            state['variances'],
            state['estimate_windows'],
            previous_state)
        state['acceptable_variances'] = estimation.update_acceptable_variances(
            state['acceptable_variances'],
            state['estimate_windows'],
            previous_state)
        previous_state = current_state

    selected_windows = estimation.select_window(
        state['variances'],
        state['acceptable_variances'],
        window_sizes)
    p = estimation.select_best_estimates(
        state['estimate_windows'],
        selected_windows)
    # These two are saved for testing purposes
    state['selected_windows'] = selected_windows
    state['p'] = p

    state_vector = build_state_vector(state_config, utilization)
    current_state = get_current_state(state_vector)
    state['previous_state'] = current_state

    state_n = len(state_config)
    #    if utilization_length > state['time_in_states'] + 1:
    #        for s in utilization_to_states(
    #                state_config,
    #                utilization[-(utilization_length -
    # state['time_in_states']):]):
    #            state['time_in_states'] += 1
    #            if s == state_n:
    #                state['time_in_state_n'] += 1
    #    else:
    state['time_in_states'] += 1
    if current_state == state_n:
        state['time_in_state_n'] += 1

    LOG.debug('MHOD utilization:' + str(utilization))
    LOG.debug('MHOD time_in_states:' + str(state['time_in_states']))
    LOG.debug('MHOD time_in_state_n:' + str(state['time_in_state_n']))
    LOG.debug('MHOD p:' + str(p))
    LOG.debug('MHOD current_state:' + str(current_state))
    LOG.debug('MHOD p[current_state]:' + str(p[current_state]))

    if utilization_length >= learning_steps:
        if current_state == state_n and p[state_n][state_n] > 0:
            # if p[current_state][state_n] > 0:
            policy = bruteforce.optimize(
                bruteforce_step, 1.0, otf, (migration_time / time_step), ls, p,
                state_vector, state['time_in_states'],
                state['time_in_state_n'])
            # This is saved for testing purposes
            state['policy'] = policy
            LOG.debug('MHOD policy:' + str(policy))
            command = issue_command_deterministic(policy)
            LOG.debug('MHOD command:' + str(command))
            return command, state
    return False, state
示例#4
0
def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
         time_step, migration_time, utilization, state):
    """The MHOD algorithm returning whether the host is overloaded.

    :param state_config: The state configuration.
    :param otf: The OTF parameter.
    :param window_sizes: A list of window sizes.
    :param bruteforce_step: The step of the bruteforce algorithm.
    :param time_step: The length of the simulation time step in seconds.
    :param migration_time: The VM migration time in time seconds.
    :param utilization: The history of the host's CPU utilization.
    :param state: The state of the algorithm.
    :return: The updated state and decision of the algorithm.
    """
    utilization_length = len(utilization)
    #    if utilization_length == state['time_in_states'] and \
    #      utilization == state['previous_utilization']:
    #        # No new utilization values
    #        return False, state

    number_of_states = len(state_config) + 1
    previous_state = 0
    #    state['previous_utilization'] = utilization
    state['request_windows'] = estimation.init_request_windows(
        number_of_states, max(window_sizes))
    state['estimate_windows'] = estimation.init_deque_structure(
        window_sizes, number_of_states)
    state['variances'] = estimation.init_variances(window_sizes,
                                                   number_of_states)
    state['acceptable_variances'] = estimation.init_variances(
        window_sizes, number_of_states)

    for i, current_state in enumerate(
            utilization_to_states(state_config, utilization)):
        state['request_windows'] = estimation.update_request_windows(
            state['request_windows'], previous_state, current_state)
        state['estimate_windows'] = estimation.update_estimate_windows(
            state['estimate_windows'], state['request_windows'],
            previous_state)
        state['variances'] = estimation.update_variances(
            state['variances'], state['estimate_windows'], previous_state)
        state['acceptable_variances'] = estimation.update_acceptable_variances(
            state['acceptable_variances'], state['estimate_windows'],
            previous_state)
        previous_state = current_state

    selected_windows = estimation.select_window(state['variances'],
                                                state['acceptable_variances'],
                                                window_sizes)
    p = estimation.select_best_estimates(state['estimate_windows'],
                                         selected_windows)
    # These two are saved for testing purposes
    state['selected_windows'] = selected_windows
    state['p'] = p

    state_vector = build_state_vector(state_config, utilization)
    current_state = get_current_state(state_vector)
    state['previous_state'] = current_state

    state_n = len(state_config)
    #    if utilization_length > state['time_in_states'] + 1:
    #        for s in utilization_to_states(
    #                state_config,
    #                utilization[-(utilization_length -
    # state['time_in_states']):]):
    #            state['time_in_states'] += 1
    #            if s == state_n:
    #                state['time_in_state_n'] += 1
    #    else:
    state['time_in_states'] += 1
    if current_state == state_n:
        state['time_in_state_n'] += 1

    LOG.debug('MHOD utilization:' + str(utilization))
    LOG.debug('MHOD time_in_states:' + str(state['time_in_states']))
    LOG.debug('MHOD time_in_state_n:' + str(state['time_in_state_n']))
    LOG.debug('MHOD p:' + str(p))
    LOG.debug('MHOD current_state:' + str(current_state))
    LOG.debug('MHOD p[current_state]:' + str(p[current_state]))

    if utilization_length >= learning_steps:
        if current_state == state_n and p[state_n][state_n] > 0:
            # if p[current_state][state_n] > 0:
            policy = bruteforce.optimize(bruteforce_step, 1.0, otf,
                                         (migration_time / time_step), ls, p,
                                         state_vector, state['time_in_states'],
                                         state['time_in_state_n'])
            # This is saved for testing purposes
            state['policy'] = policy
            LOG.debug('MHOD policy:' + str(policy))
            command = issue_command_deterministic(policy)
            LOG.debug('MHOD command:' + str(command))
            return command, state
    return False, state