Beispiel #1
0
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from dqn_agent import Agent 

pong_game = "PongNoFrameskip-v4"
pong_reward_threshold = 19.0
pong_model_filepath = "pong_model.json"
pong_model_weights_filepath = "pong_model.h5"

env = make_atari(pong_game)
env = wrap_deepmind(env, frame_stack=True, scale=True)
env.seed(seed)

pong_agent = Agent(env, reward_threshold=pong_reward_threshold,
                    model_filepath=pong_model_filepath,
                    model_weights_filepath=pong_model_weights_filepath)
pong_agent.train()
pong_agent.run()
Beispiel #2
0
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from dqn_agent import Agent 

seed = 42

env = make_atari("BreakoutNoFrameskip-v4")
# Warp the frames, grey scale, stake four frame and scale to smaller ratio
env = wrap_deepmind(env, frame_stack=True, scale=True)
env.seed(seed)

breakout_agent = Agent(env)
breakout_agent.train()
breakout_agent.run()

def main(limit_episode, limit_step, seed=0, load=False):
    rospy.init_node('dqn_pendulum_myrrbot7')

    pub = rospy.Publisher("/myrrbot7/joint1_cotroller/command",
                          Float64,
                          queue_size=1)

    loop_rate = rospy.Rate(hz)

    result_path = "/home/amsl/ros_catkin_ws/src/deep_actor_critic/actor_critic_for_swingup/test_results/results/dqn_result.txt"
    model_path = "/home/amsl/ros_catkin_ws/src/deep_actor_critic/actor_critic_for_swingup/test_results/models/dqn_"

    init_theta = 0.0
    init_omega = 0.0

    state = get_state(init_theta, init_omega)
    state_dash = get_state(init_theta, init_omega)
    action = 0.0
    reward = 0.0
    action_list = [
        np.array([a], dtype=np.float64) for a in [min_torque, max_torque]
    ]
    n_st = len(state[0])
    n_act = len(action_list)

    Q_list = np.array([])
    max_Q = 0.0
    ave_Q = 0.0
    reward_list = np.array([])
    ave_reward = 0.0
    total_reward = 0.0

    temp_result = np.array([[]])
    test_result = np.array([[]])

    evaluation_flag = False
    wait_flag = True

    agent = Agent(n_st, n_act, seed)

    if load:
        agent.load_model(model_path)

    episode_count = 0
    time = 1
    count = 0
    wait_count = 0

    while not rospy.is_shutdown():
        if wait_flag:
            wait_count += 1
            #  print "wait_count : ", wait_count

            state = get_state(init_theta, init_omega)
            state_dash = get_state(init_theta, init_omega)
            reset_client(init_theta)
            action = 0.0
            pub.publish(action)

            if wait_count == 0.5 * hz:
                init_theta = uniform(-1 * math.pi, math.pi)

            if wait_count % hz == 0:
                wait_count = 0
                wait_flag = False
                #  print "Please Wait 1 second"
                #  print "state : ", state
                #  print "state_dash : ", state_dash
        else:
            if not evaluation_flag:
                #  print "Now Learning!!!!!"
                #  print "episode : ", episode_count
                #  print "time : ", time
                #  print "state : ", state
                act_i, q = agent.get_action(state, False)
                Q_list = np.append(Q_list, [q])
                #  print "act_i : ", act_i
                action = action_list[act_i]
                #  print "action : ", action
                pub.publish(action)
                theta, omega = get_joint_properties('myrrbot7_joint1')
                #  print "theta : %f, omega : %f" % (theta, omega)
                state_dash = get_state(theta, omega)
                #  print "state_dash : ", state_dash
                reward = get_reward(theta, omega, action)
                reward_list = np.append(reward_list, [reward])
                #  print "reward : ", reward
                ep_end = get_ep_end(theta, time, limit_step)
                #  print "ep_end : ", ep_end
                agent.stock_experience(count, state, act_i, reward, state_dash,
                                       ep_end)
                agent.train(count)

                time += 1
                count += 1

                if ep_end:
                    max_Q = np.max(Q_list)
                    ave_Q = np.average(Q_list)
                    #  print "max_Q : ", max_Q
                    #  print "ave_Q : ",ave_Q
                    ave_reward = np.average(reward_list)
                    total_reward = np.sum(reward_list)
                    #  print "ave_reward : ", ave_reward
                    #  print "total_reward : ", total_reward

                    print "Episode : %d\t/Reward Sum : %f\tEpsilon : %f\tLoss : %f\t/Average Q : %f\t/Time Step : %d" % (
                        episode_count, total_reward, agent.epsilon, agent.loss,
                        np.sum(Q_list) / float(time), agent.step + 1)
                    Q_list = np.array([])
                    reward_list = np.array([])
                    temp_result = np.array(([[
                        episode_count, max_Q, ave_Q, ave_reward, total_reward
                    ]]),
                                           dtype=np.float32)

                    if episode_count == 0:
                        #  print "test_result : ", test_result
                        test_result = temp_result
                        #  print "test_result : ", test_result
                    else:
                        test_result = np.r_[test_result, temp_result]

                    save_result(result_path, test_result)
                    agent.save_model(model_path)

                    if episode_count % 1 == 0:
                        evaluation_flag = False

                    episode_count += 1
                    time = 0
                    wait_flag = True
            else:
                #  print "Now evaluation!!!"
                #  print "episode : ", episode_count-1
                #  print "time : ", time
                #  print "state : ", state
                act_i, q = agent.get_action(state, True)
                Q_list = np.append(Q_list, [q])
                #  print "act_i : ", act_i
                #  print "Q_list : ", Q_list
                action = action_list[act_i]
                #  print "action : ", action
                pub.publish(action)
                theta, omega = get_joint_properties('myrrbot7_joint1')
                #  print "theta : %f, omega : %f" % (theta, omega)
                state_dash = get_state(theta, omega)
                #  print "state_dash : ", state_dash
                reward = get_reward(theta, omega, action)
                #  print "reward : ", reward
                reward_list = np.append(reward_list, [reward])
                #  print "reward_list : ", reward_list
                ep_end = get_ep_end(theta, time, limit_step)
                #  print "ep_end : ", ep_end
                if ep_end:
                    max_Q = np.max(Q_list)
                    ave_Q = np.average(Q_list)
                    #  print "max_Q : ", max_Q
                    #  print "ave_Q : ",ave_Q
                    ave_reward = np.average(reward_list)
                    total_reward = np.sum(reward_list)
                    #  print "ave_reward : ", ave_reward
                    #  print "total_reward : ", total_reward

                    print "Episode : %d\t/Reward Sum : %f\tEpsilon : %f\tLoss : %f\t/Average Q : %f\t/Time Step : %d" % (
                        episode_count - 1, total_reward, agent.epsilon,
                        agent.loss, np.sum(Q_list) / float(time + 1),
                        agent.step)
                    Q_list = np.array([])
                    reward_list = np.array([])

                    time = 0
                    wait_flag = True
                    evaluation_flag = False

                    temp_result = np.array(([[
                        episode_count - 1, max_Q, ave_Q, ave_reward,
                        total_reward
                    ]]),
                                           dtype=np.float32)

                    if episode_count - 1 == 0:
                        #  print "test_result : ", test_result
                        test_result = temp_result
                        #  print "test_result : ", test_result
                    else:
                        test_result = np.r_[test_result, temp_result]

                    save_result(result_path, test_result)
                    agent.save_model(model_path)

                time += 1

        loop_rate.sleep()
Beispiel #4
0
                with open(os.path.join(save_dir, "path.txt"), "a") as myfile:
                    myfile.write('iteration {}, rl_cost {}: {}\n'.format(
                        global_step, tsp_cost, dqn_env.steps))

            completed_episodes += 1
            episode_reward.append(ep_reward)
            episode_length.append(t)
            rl_cost.append(tsp_computer.rl_cost(dqn_env.steps))

        # No changes to either Q and target Q network until the replay memory has been filled
        # with <replay_start_size> random transitions
        if not c.test and number_of_observations_experienced >= replay_start_size:

            # Train the Q network once every <update_frequency> iterations
            if number_of_observations_experienced % update_frequency == 0:
                agent.train()

            # Update target Q network weights once every <target_network_update_frequency> iterations to be equal
            # to the Q network weights
            if number_of_observations_experienced % target_network_update_frequency == 0:
                agent.update_target_Q_network_weights()

            # Decrease exploration probability
            agent.update_exploration_value()

        # New timestep
        t += 1
        global_step += 1
        number_of_observations_experienced += 1
        o_t = o_tp1
        if c.test: