コード例 #1
0
                states = np.reshape(states, [1, state_size])
                action = QN.act(states)
                next_state, rewards, overall_err = sim_env.Assign_Cores(action)
                next_state = np.reshape(next_state, [1, state_size])
                QN.remember(states, action, rewards, next_state)
                states = next_state
                if len(QN.memory) > batch_size:
                    QN.replay(batch_size)

            loss_overall = np.append(loss_overall, QN.loss_avg/training)
            QN.loss_avg = 0

            sim_env.reset()
            for u in range(testing):
                states = np.reshape(states, [1, state_size])
                action = QN.act_test(states)
                # print('SNR:', sim_env.SNR[-1])
                # print('action:', sim_env.action[action])
                next_state, rewards, overall_err = sim_env.Assign_Cores(action)
                error = np.append(error, overall_err)
                next_state = np.reshape(next_state, [1, state_size])
                states = next_state
            print(e)
            print(sim_env.error/testing)
            error_avg = np.append(error_avg, np.power(10, -sim_env.error/testing))

        # Save Error and Losses in CSV file, Save weights of networks ####
        parameters = '_DQN_S{}_rho{}_SNR{}_PS{}_lr{}_df{}_sl{}_nhl{}_ef{}'.\
            format(sim_env.S, sim_env.p, sim_env.SNR_avg[0], sim_env.pi, QN.learning_rate,
                    QN.gamma, QN.size_layers, QN.number_hidden_layers, QN.epsilon_decay)
        print(parameters)
コード例 #2
0
    print(r)
    parameters = 'old/DQN__S{}_rho{}_SNR{}_PS{}_W4_lr0.0001_df0.0_sl24_nhl1_ef0.9'.format(
        r, 0.9, SNR, sim_env.pi)
    QN.load(sim_env.channel_type + '/' + parameters)
    sim_env = Simulation(number_of_servers=num_of_servers,
                         number_of_users=1,
                         historic_time=hist_timeslots,
                         snr_set=avg_SNR,
                         csi=0,
                         channel=0.9)
    sim_env.reset()
    states = sim_env.state  # get first state
    for u in range(testing):
        states = np.reshape(
            states, [1, state_size])  # reshape state to vector for network
        action = QN.act_test(states)  # get action from DQN agent
        # print('SNR:', sim_env.SNR[-1])
        # print('action:', sim_env.action[action])
        next_state, rewards, overall_err = sim_env.Assign_Cores(
            action, u)  # get next state, reward and error
        error = np.append(error, overall_err)
        next_state = np.reshape(next_state,
                                [1, state_size])  # reshape next state
        states = next_state  # state = next state
    print(r)
    print(np.power(10, -sim_env.error / testing))
    error_avg = np.append(error_avg, np.power(10, -sim_env.error / testing))

# Save Error and Losses in CSV file, Save weights of networks ####
parameters = 'DQN_S{}_rho{}_SNR{}_PS{}_OverMaxBlkL'.format(
    sim_env.S, sim_env.p, sim_env.SNR_avg[0], sim_env.pi)