예제 #1
0
def example1(policy_id):
    params = Params(
        mu=3,
        lambda1=4,
        lambda2=4,
        servers_number=5,
        fragments_numbers=[2, 3],
        queues_capacities=[3, 3],
    )

    all_states = get_all_states(params)
    states_with_policy = get_policed_states(all_states, params)
    policies = get_all_possible_policies(states_with_policy)
    # print("All possible policies number: ", len(policies))

    selected_policy_vector = None
    for idx, p in enumerate(policies):
        selected_policy_vector = p
        if idx == policy_id:
            break

    print("Current policy:")
    policy_function = Policy(
        selected_policy_vector, states_with_policy, params
    ).get_action_for_state
    for state in states_with_policy:
        print(f"    state = {state}, action = {policy_function(state)}")

    bar = ConsoleProgressBar("Progress: ")
    model = SplitMergeSystem(params, bar, policy_function)
    simulation_time = 10_000
    statistics = model.run(simulation_time)

    print(statistics)
예제 #2
0
def example1(policy_id):
    params = Params(
        mu=3,
        lambda1=4,
        lambda2=4,
        servers_number=5,
        fragments_numbers=[2, 3],
        queues_capacities=[3, 3],
    )

    all_states = get_all_states(params)
    states_with_policy = get_policed_states(all_states, params)
    policies = get_all_possible_policies(states_with_policy)
    # print("All possible policies number: ", len(policies))

    selected_policy_vector = None
    for idx, p in enumerate(policies):
        selected_policy_vector = p
        if idx == policy_id:
            break

    print("Current policy:")
    policy = Policy(selected_policy_vector, states_with_policy, params)
    for state in states_with_policy:
        print(
            f"    state = {state}, action = {policy.get_action_for_state(state)}"
        )

    calculations = Calculations(params)
    calculations.calculate(policy)
    performance_measures = calculations.performance_measures
    print(performance_measures, "\n")
예제 #3
0
def example3(queue_id):
    params = Params(
        mu=3,
        lambda1=4,
        lambda2=4,
        servers_number=6,
        fragments_numbers=[3, 2],
        queues_capacities=[3, 3],
    )

    all_states = get_all_states(params)
    states_with_policy = get_policed_states(all_states, params)

    selected_policy_vector = [queue_id] * len(states_with_policy)

    print("Current policy:")
    policy = Policy(selected_policy_vector, states_with_policy, params)
    for state in states_with_policy:
        print(
            f"    state = {state}, action = {policy.get_action_for_state(state)}"
        )

    calculations = Calculations(params)
    calculations.calculate(policy)
    performance_measures = calculations.performance_measures
    print(performance_measures, "\n")
예제 #4
0
    def __init__(
        self,
        model: SplitMergeSystem,
        state_with_policy: list,
        learning_rate=1,
        reward_decay=0.9,
        e_greedy=0.4,
        progress_bar: ProgressBar = None,
    ):
        self.model = model
        self.state_with_policy = state_with_policy
        self.actions = model.actions
        self.lr = learning_rate
        self.gamma = reward_decay
        self.epsilon = e_greedy
        self._progress_bar = progress_bar

        all_states = get_all_states(model.get_params())
        self._states_with_policy = get_policed_states(all_states, model.get_params())
        self.n_states_with_actions = len(self._states_with_policy)

        self._state_id_map = dict()
        for id, state in enumerate(self._states_with_policy):
            self._state_id_map[str(state)] = id

        self._n_actions = 2
        self.q_table = np.zeros((self.n_states_with_actions, self._n_actions))
    def calculate(self, states_policy) -> None:
        from analytical_calculations.generator import get_stationary_distribution
        from analytical_calculations.logs import log_network_configuration, log_message

        log_network_configuration(self.params)
        states = get_all_states(self.params)

        distribution = get_stationary_distribution(states, states_policy,
                                                   self.params)
        log_message(f"Stationary distribution P_i:\n {distribution}")
        log_message(f"Check sum P_i: {sum(distribution)}")
        self.calculate_performance_measures(distribution, states)
예제 #6
0
def example1():
    params = Params(
        mu=3,
        lambda1=3,
        lambda2=3,
        servers_number=6,
        fragments_numbers=[2, 3],
        queues_capacities=[2, 2],
    )

    all_states = get_all_states(params)
    states_with_policy = get_policed_states(all_states, params)
    states_policy = Policy(tuple(), states_with_policy, params)
    states_policy.print_adjacent_states()

    iterative = IterativeMethod(all_states, states_policy, get_state_reward, params)
    iterative.apply(print_iteration_results=True)

    print("executed")
예제 #7
0
def example2():
    params = Params(
        mu=3,
        lambda1=5,
        lambda2=5,
        servers_number=6,
        fragments_numbers=[3, 2],
        queues_capacities=[1, 1],
    )

    all_states = get_all_states(params)
    states_with_policy = get_policed_states(all_states, params)
    print("All states where policy is possible:")
    pprint(states_with_policy)

    strategies = get_all_possible_policies(states_with_policy)
    states_policy = Policy(tuple(), states_with_policy, params)
    states_policy.print_adjacent_states()

    storage = PerformanceMeasuresStorage()
    print()

    for strategy in strategies:
        states_policy.policy_vector = strategy
        print(strategy)
        calculations = Calculations(params)
        calculations.calculate(states_policy)
        performance_measures = calculations.performance_measures
        print(performance_measures, "\n")

        storage.append(strategy, performance_measures)

    print(storage)
    print()
    storage.show_difference()

    print("executed")
예제 #8
0
def example1(lambdas, queue_id):
    storage = PerformanceMeasuresStorage()
    for lambd in lambdas:
        params = Params(
            mu=3,
            lambda1=lambd,
            lambda2=lambd,
            servers_number=5,
            fragments_numbers=[2, 3],
            queues_capacities=[3, 3],
        )

        all_states = get_all_states(params)
        states_with_policy = get_policed_states(all_states, params)

        selected_policy_vector = [queue_id] * len(states_with_policy)

        policy = Policy(selected_policy_vector, states_with_policy, params)

        calculations = Calculations(params)
        calculations.calculate(policy)
        performance_measures = calculations.performance_measures
        print(performance_measures, "\n")

        storage.append(lambd, performance_measures)

    print(storage)
    print()
    storage.show_difference()

    plt.title("Зависимость T от интенсивности входящего потока")
    plt.xlabel("lambdas")
    plt.ylabel("T")
    plt.grid()
    plt.plot(lambdas, storage.response_times, "g", linewidth=2, markersize=12)
    plt.show()

    plt.title("Зависимость T1 и T2 от интенсивности входящего потока")
    plt.xlabel("lambdas")
    plt.ylabel("T")
    plt.grid()
    plt.plot(lambdas,
             storage.response_times1,
             "g",
             label="T1",
             linewidth=2,
             markersize=12)
    plt.plot(lambdas,
             storage.response_times2,
             "b",
             label="T2",
             linewidth=2,
             markersize=12)
    plt.legend()
    plt.show()

    plt.title("Зависимость pf от интенсивности входящего потока")
    plt.xlabel("lambdas")
    plt.ylabel("pf")
    plt.grid()
    plt.plot(lambdas,
             storage.blocked_all_queues_probability,
             "b",
             linewidth=2,
             markersize=12)
    plt.show()

    plt.title("Зависимость pf1 и pf2 от интенсивности входящего потока")
    plt.xlabel("lambdas")
    plt.ylabel("pf")
    plt.grid()
    plt.plot(
        lambdas,
        storage.failure_probabilities1,
        "g",
        label="pf1",
        linewidth=2,
        markersize=12,
    )
    plt.plot(
        lambdas,
        storage.failure_probabilities2,
        "b",
        label="pf2",
        linewidth=2,
        markersize=12,
    )
    plt.legend()
    plt.show()
    # params = Params(mu=3, lambda1=5, lambda2=5,
    #                 servers_number=6,
    #                 fragments_numbers=[3, 3],
    #                 queues_capacities=[2, 2])

    params = Params(
        mu=3,
        lambda1=3,
        lambda2=3,
        servers_number=6,
        fragments_numbers=[2, 3],
        queues_capacities=[2, 2],
    )

    states = get_all_states(params)
    states_with_policy = get_policed_states(states, params)
    print("All states where policy is possible:")
    pprint(states_with_policy)

    states_policy = Policy(tuple(), states_with_policy, params)
    states_policy.print_adjacent_states()

    model = SplitMergeSystem(params=params, reward_policy=reward_function2)
    agent = QLearning(
        model,
        states_with_policy,
        reward_decay=1,
        learning_rate=0.3,
        e_greedy=0.1,
        progress_bar=ConsoleProgressBar("*"),