示例#1
0
def main(limit_episode, limit_step, seed=0, load=False):
    rospy.init_node('dqn_pendulum_myrrbot7')

    pub = rospy.Publisher("/myrrbot7/joint1_cotroller/command",
                          Float64,
                          queue_size=1)

    loop_rate = rospy.Rate(hz)

    result_path = "/home/amsl/ros_catkin_ws/src/deep_actor_critic/actor_critic_for_swingup/test_results/results/dqn_result.txt"
    model_path = "/home/amsl/ros_catkin_ws/src/deep_actor_critic/actor_critic_for_swingup/test_results/models/dqn_"

    init_theta = 0.0
    init_omega = 0.0

    state = get_state(init_theta, init_omega)
    state_dash = get_state(init_theta, init_omega)
    action = 0.0
    reward = 0.0
    action_list = [
        np.array([a], dtype=np.float64) for a in [min_torque, max_torque]
    ]
    n_st = len(state[0])
    n_act = len(action_list)

    Q_list = np.array([])
    max_Q = 0.0
    ave_Q = 0.0
    reward_list = np.array([])
    ave_reward = 0.0
    total_reward = 0.0

    temp_result = np.array([[]])
    test_result = np.array([[]])

    evaluation_flag = False
    wait_flag = True

    agent = Agent(n_st, n_act, seed)

    if load:
        agent.load_model(model_path)

    episode_count = 0
    time = 1
    count = 0
    wait_count = 0

    while not rospy.is_shutdown():
        if wait_flag:
            wait_count += 1
            #  print "wait_count : ", wait_count

            state = get_state(init_theta, init_omega)
            state_dash = get_state(init_theta, init_omega)
            reset_client(init_theta)
            action = 0.0
            pub.publish(action)

            if wait_count == 0.5 * hz:
                init_theta = uniform(-1 * math.pi, math.pi)

            if wait_count % hz == 0:
                wait_count = 0
                wait_flag = False
                #  print "Please Wait 1 second"
                #  print "state : ", state
                #  print "state_dash : ", state_dash
        else:
            if not evaluation_flag:
                #  print "Now Learning!!!!!"
                #  print "episode : ", episode_count
                #  print "time : ", time
                #  print "state : ", state
                act_i, q = agent.get_action(state, False)
                Q_list = np.append(Q_list, [q])
                #  print "act_i : ", act_i
                action = action_list[act_i]
                #  print "action : ", action
                pub.publish(action)
                theta, omega = get_joint_properties('myrrbot7_joint1')
                #  print "theta : %f, omega : %f" % (theta, omega)
                state_dash = get_state(theta, omega)
                #  print "state_dash : ", state_dash
                reward = get_reward(theta, omega, action)
                reward_list = np.append(reward_list, [reward])
                #  print "reward : ", reward
                ep_end = get_ep_end(theta, time, limit_step)
                #  print "ep_end : ", ep_end
                agent.stock_experience(count, state, act_i, reward, state_dash,
                                       ep_end)
                agent.train(count)

                time += 1
                count += 1

                if ep_end:
                    max_Q = np.max(Q_list)
                    ave_Q = np.average(Q_list)
                    #  print "max_Q : ", max_Q
                    #  print "ave_Q : ",ave_Q
                    ave_reward = np.average(reward_list)
                    total_reward = np.sum(reward_list)
                    #  print "ave_reward : ", ave_reward
                    #  print "total_reward : ", total_reward

                    print "Episode : %d\t/Reward Sum : %f\tEpsilon : %f\tLoss : %f\t/Average Q : %f\t/Time Step : %d" % (
                        episode_count, total_reward, agent.epsilon, agent.loss,
                        np.sum(Q_list) / float(time), agent.step + 1)
                    Q_list = np.array([])
                    reward_list = np.array([])
                    temp_result = np.array(([[
                        episode_count, max_Q, ave_Q, ave_reward, total_reward
                    ]]),
                                           dtype=np.float32)

                    if episode_count == 0:
                        #  print "test_result : ", test_result
                        test_result = temp_result
                        #  print "test_result : ", test_result
                    else:
                        test_result = np.r_[test_result, temp_result]

                    save_result(result_path, test_result)
                    agent.save_model(model_path)

                    if episode_count % 1 == 0:
                        evaluation_flag = False

                    episode_count += 1
                    time = 0
                    wait_flag = True
            else:
                #  print "Now evaluation!!!"
                #  print "episode : ", episode_count-1
                #  print "time : ", time
                #  print "state : ", state
                act_i, q = agent.get_action(state, True)
                Q_list = np.append(Q_list, [q])
                #  print "act_i : ", act_i
                #  print "Q_list : ", Q_list
                action = action_list[act_i]
                #  print "action : ", action
                pub.publish(action)
                theta, omega = get_joint_properties('myrrbot7_joint1')
                #  print "theta : %f, omega : %f" % (theta, omega)
                state_dash = get_state(theta, omega)
                #  print "state_dash : ", state_dash
                reward = get_reward(theta, omega, action)
                #  print "reward : ", reward
                reward_list = np.append(reward_list, [reward])
                #  print "reward_list : ", reward_list
                ep_end = get_ep_end(theta, time, limit_step)
                #  print "ep_end : ", ep_end
                if ep_end:
                    max_Q = np.max(Q_list)
                    ave_Q = np.average(Q_list)
                    #  print "max_Q : ", max_Q
                    #  print "ave_Q : ",ave_Q
                    ave_reward = np.average(reward_list)
                    total_reward = np.sum(reward_list)
                    #  print "ave_reward : ", ave_reward
                    #  print "total_reward : ", total_reward

                    print "Episode : %d\t/Reward Sum : %f\tEpsilon : %f\tLoss : %f\t/Average Q : %f\t/Time Step : %d" % (
                        episode_count - 1, total_reward, agent.epsilon,
                        agent.loss, np.sum(Q_list) / float(time + 1),
                        agent.step)
                    Q_list = np.array([])
                    reward_list = np.array([])

                    time = 0
                    wait_flag = True
                    evaluation_flag = False

                    temp_result = np.array(([[
                        episode_count - 1, max_Q, ave_Q, ave_reward,
                        total_reward
                    ]]),
                                           dtype=np.float32)

                    if episode_count - 1 == 0:
                        #  print "test_result : ", test_result
                        test_result = temp_result
                        #  print "test_result : ", test_result
                    else:
                        test_result = np.r_[test_result, temp_result]

                    save_result(result_path, test_result)
                    agent.save_model(model_path)

                time += 1

        loop_rate.sleep()
示例#2
0
# Task 4 - DQN
agent = DQNAgent(state_space_dim, n_actions, replay_buffer_size, batch_size,
                 hidden, gamma)

# Training loop
cumulative_rewards = []
for ep in range(num_episodes):
    # Initialize the environment and state
    state = env.reset()
    done = False
    eps = glie_a / (glie_a + ep)
    cum_reward = 0
    while not done:
        # Select and perform an action
        action = agent.get_action(state, eps)
        next_state, reward, done, _ = env.step(action)
        cum_reward += reward

        # Task 1: TODO: Update the Q-values
        #agent.single_update(state,action,next_state,reward,done)

        # Task 2: TODO: Store transition and batch-update Q-values
        #agent.store_transition(state,action,next_state,reward,done)
        #agent.update_estimator()

        # Task 4: Update the DQN
        agent.store_transition(state, action, next_state, reward, done)
        agent.update_network()
        # Move to the next state
        state = next_state
示例#3
0
#Task 4 - DQN
agent = DQNAgent(state_space_dim, n_actions, replay_buffer_size, batch_size,
                 hidden, gamma)

# Training loop
cumulative_rewards = []
for ep in range(num_episodes):
    # Initialize the environment and state
    state = env.reset()
    done = False
    eps = glie_a / (glie_a + ep)
    cum_reward = 0
    while not done:
        # Select and perform an action
        action = agent.get_action(state, eps)
        next_state, reward, done, _ = env.step(action)
        cum_reward += reward

        # Task 1: TODO: Update the Q-values
        #agent.single_update(state,action,next_state,reward,done)
        # Task 2: TODO: Store transition and batch-update Q-values
        agent.store_transition(state, action, next_state, reward, done)
        #agent.update_estimator()
        # Task 4: Update the DQN
        agent.update_network()
        # Move to the next state
        state = next_state
    cumulative_rewards.append(cum_reward)
    plot_rewards(cumulative_rewards)