Ejemplo n.º 1
0
def train():
    env = ContinuousCartPoleEnv()
    state_dim = 4
    action_dim = 2

    # reproducible
    # env.seed(RANDOMSEED)
    np.random.seed(RANDOMSEED)
    torch.manual_seed(RANDOMSEED)

    ppo = PPO(state_dim, action_dim, method=METHOD)
    global all_ep_r, update_plot, stop_plot
    all_ep_r = []
    for ep in range(EP_MAX):
        s = env.reset()
        ep_r = 0
        t0 = time.time()
        for t in range(EP_LEN):
            if RENDER:
                env.render()
            a = ppo.choose_action(s)
            u = np.clip(gene_u(s, a, model_1, model_2), -1, 1)
            s_, _, done, _ = env.step(u)
            # print(s, a, s_, r, done)
            # assert False
            r = 5
            r -= WEIGHT * abs(u[0])
            # r -= 1 / WEIGHT * (abs(s_[0]) + abs(s_[1]))
            if done and t != 199:
                r -= 50
            ppo.store_transition(
                s, a, r
            )  # useful for pendulum since the nets are very small, normalization make it easier to learn
            s = s_
            ep_r += r

            # update ppo
            if len(ppo.state_buffer) == BATCH_SIZE:
                ppo.finish_path(s_, done)
                ppo.update()
            # if done:
            #     break
        ppo.finish_path(s_, done)
        print(
            'Episode: {}/{}  | Episode Reward: {:.4f}  | Running Time: {:.4f}'.
            format(ep + 1, EP_MAX, ep_r,
                   time.time() - t0))
        if ep == 0:
            all_ep_r.append(ep_r)
        else:
            all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
        if PLOT_RESULT:
            update_plot.set()
        if (ep + 1) % 500 == 0 and ep >= 3000:
            ppo.save_model(path='ppo', ep=ep, weight=WEIGHT)
    if PLOT_RESULT:
        stop_plot.set()
    env.close()
Ejemplo n.º 2
0
def evaluate(render=True):
    env = ContinuousCartPoleEnv()

    obs_size = env.observation_space.shape[0]
    action_size = env.action_space.shape[0]
    model = Model(state_size=obs_size, action_size=action_size)
    target_model = Model(state_size=obs_size, action_size=action_size)

    alg = DDPG(model,
               target_model,
               gamma=0.99,
               tau=1e-3,
               actor_lr=1e-4,
               critic_lr=3e-4)
    agent = Agent(alg, BUFFER_SIZE, BATCH_SIZE, seed=10)
    agent.alg.model.actor_model.load_state_dict(
        torch.load("cart_pole_actor.pth"))
    agent.alg.model.critic_model.load_state_dict(
        torch.load("cart_pole_critic.pth"))

    eval_reward = []
    for i in range(10):
        obs = env.reset()
        total_reward = 0
        steps = 0
        while True:
            action = agent.act(obs)

            steps += 1
            next_obs, reward, done, info = env.step(action)

            obs = next_obs
            total_reward += reward

            if render:
                env.render()
            if done:
                break
        eval_reward.append(total_reward)
    return np.mean(eval_reward)
Ejemplo n.º 3
0
 #     thread = threading.Thread(target=train)
 #     thread.daemon = True
 #     thread.start()
 #     if PLOT_RESULT:
 #         drawer = Drawer()
 #         drawer.plot()
 #         drawer.save()
 #     thread.join()
 train()
 assert False
 # test
 env = ContinuousCartPoleEnv()
 state_dim = 2
 action_dim = 2
 ppo = PPO(state_dim, action_dim, method=METHOD)
 ppo.load_model()
 mean_epoch_reward = 0
 for _ in range(TEST_EP):
     state = env.reset()
     for i in range(EP_LEN):
         if RENDER:
             env.render()
         action = ppo.choose_action(state, True)
         u = gene_u(state, action, model_1, model_2)
         next_state, reward, done = env.step(u)
         mean_epoch_reward += reward
         state = next_state
         if done:
             break
 print(mean_epoch_reward / TEST_EP)
 env.close()