Exemplo n.º 1
0
def test_linear_schedule():
    """
    test LinearSchedule
    """
    linear_sched = LinearSchedule(schedule_timesteps=100,
                                  initial_p=0.2,
                                  final_p=0.8)
    assert np.isclose(linear_sched.value(50), 0.5)
    assert np.isclose(linear_sched.value(0), 0.2)
    assert np.isclose(linear_sched.value(100), 0.8)

    linear_sched = LinearSchedule(schedule_timesteps=100,
                                  initial_p=0.8,
                                  final_p=0.2)
    assert np.isclose(linear_sched.value(50), 0.5)
    assert np.isclose(linear_sched.value(0), 0.8)
    assert np.isclose(linear_sched.value(100), 0.2)

    linear_sched = LinearSchedule(schedule_timesteps=100,
                                  initial_p=-0.6,
                                  final_p=0.2)
    assert np.isclose(linear_sched.value(50), -0.2)
    assert np.isclose(linear_sched.value(0), -0.6)
    assert np.isclose(linear_sched.value(100), 0.2)

    linear_sched = LinearSchedule(schedule_timesteps=100,
                                  initial_p=0.2,
                                  final_p=-0.6)
    assert np.isclose(linear_sched.value(50), -0.2)
    assert np.isclose(linear_sched.value(0), 0.2)
    assert np.isclose(linear_sched.value(100), -0.6)
Exemplo n.º 2
0
 def __init__(self):
     self.hyperparams = {"n_steps": 1024,
                         "nminibatches": 32,
                         "cliprange": 0.4,
                         "gamma": 0.996,
                         "lam": 0.95,
                         "learning_rate": LinearSchedule(1.0, initial_p=0.0002, final_p=0.001).value,
                         "noptepochs": 4,
                         "ent_coef": 0.002}
Exemplo n.º 3
0
def train(algorithm='dqn', timesteps=2e5):

    # env = gym.make('LunarLander-v2')  # This uses the library version of the Lunar Lander env.
    print('algorithm: ', algorithm)
    print('timesteps: ', timesteps)

    learning_rate = 0.001

    if algorithm.lower() == 'dqn':
        env = LunarLander()
        model = DQN('MlpPolicy', env, learning_rate=learning_rate,
                    prioritized_replay=True,
                    verbose=1)
    elif algorithm.lower() == 'ppo2':
        n_envs = 4
        env = SubprocVecEnv([lambda: LunarLander() for i in range(n_envs)])

        schedule = LinearSchedule(int(float(timesteps)), 0.00001, 0.1).value
        model = PPO2('MlpPolicy', env, learning_rate=schedule,
                     verbose=1)
    else:
        raise RuntimeError("Unknown algorithm. %s" % algorithm)

    # mean_reward, std_reward = evaluate_policy(
    #     model, model.get_env(), n_eval_episodes=10)

    # Train the agent
    model.learn(total_timesteps=int(float(timesteps)), log_interval=10)
    # Save the agent
    model.save("trained_models/latest")

    now = datetime.now()
    dt_string = now.strftime("%Y-%m-%d_%H-%M-%S")
    model.save("trained_models/lunar_climber_%s-%s" %
               (algorithm.lower(), dt_string))

    # #lot training progress
    # plt.plot(env.all_rewards)
    # plt.ylabel('Reward')
    # plt.xlabel('Timesteps')
    # plt.savefig('figures/stats-%s.png' % dt_string)

    print("Model trained!")
                randomBall=my_randomBall,
                binaryReward=my_binaryReward)  # 0.01745*5
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
# env = DummyVecEnv([lambda: env])
timesteps = 2000000

lr_start = 0.0005  # macht erst was bei 0.00014
lr_end = 0.00004
half_life = 0.1
dyn_lr = ExpLearningRate(timesteps=timesteps,
                         lr_start=lr_start,
                         lr_min=lr_end,
                         half_life=half_life)

llr = LinearSchedule(timesteps, 0.005, 0.0001)  # default: 0.00025

my_learning_rate = dyn_lr.value  # 0.000063
# my_learning_rate = scheduler.value
# my_learning_rate = 0.00075  # scheduler.value default: 2.5e-4=0.00025
#print_LR = str(lr_start) + "-" + str(lr_end)
print_LR = str(my_learning_rate)

#static_learning_rate = 0.00014  # my_learning_rate.value

#CRAZYDEEP7:
#p_quarks = dict(net_arch=[8192, 8192, dict(
#    vf=[8192, 4096, 4096, 2048], pi=[256, 256, 128])])
#CRAZYDEEP7 Lite:
#p_quarks = dict(net_arch=[4096, 4096, dict(
#    vf=[4096, 2048, 2048, 1024], pi=[256, 256, 128])])
Exemplo n.º 5
0
    def learn(self,
              total_timesteps,
              callback=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              replay_wrapper=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)
        # callback = self._init_callback(callback)

        # with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
        #         as writer:
        self._setup_learn()

        # Create the replay buffer
        if self.prioritized_replay:
            self.replay_buffer = PrioritizedReplayBuffer(
                self.buffer_size, alpha=self.prioritized_replay_alpha)
            if self.prioritized_replay_beta_iters is None:
                prioritized_replay_beta_iters = total_timesteps
            else:
                prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
            self.beta_schedule = LinearSchedule(
                prioritized_replay_beta_iters,
                initial_p=self.prioritized_replay_beta0,
                final_p=1.0)
        else:
            self.replay_buffer = ReplayBuffer(self.buffer_size)
            self.beta_schedule = None

        if replay_wrapper is not None:
            assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
            self.replay_buffer = replay_wrapper(self.replay_buffer)

        # Create the schedule for exploration starting from 1.
        self.exploration = LinearSchedule(
            schedule_timesteps=int(self.exploration_fraction *
                                   total_timesteps),
            initial_p=self.exploration_initial_eps,
            final_p=self.exploration_final_eps)

        episode_rewards = [[0.0] * self.num_agents]  #MA-MOD
        episode_successes = []

        #callback.on_training_start(locals(), globals())
        #callback.on_rollout_start()

        reset = True
        obs = self.env.reset()

        for _ in range(total_timesteps):
            # Take action and update exploration to the newest value
            kwargs = {}
            if not self.param_noise:
                update_eps = self.exploration.value(self.num_timesteps)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = \
                    -np.log(1. - self.exploration.value(self.num_timesteps) +
                            self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True

            with self.sess.as_default():
                env_action = []  # MA-MOD
                for i in range(self.num_agents
                               ):  # MA-MOD. This is fine for one policy.
                    action = self.act[i](
                        np.array(obs[i])[None],
                        update_eps=update_eps,
                        **kwargs
                    )[0]  # TODO: Is this the correct way to get the correct agent obs?
                    env_action.append(action)
            reset = False
            new_obs, rew, done, info = self.env.step(
                env_action
            )  # NOUPDATE - env.step should take a vector of actions
            '''
            Obs: x_me, x_opp --- agent 1. In env: x_1, x_2
            Obs: x_me, x_opp -- agent 2. In env: x_2, x_1
            Env: (n_agents, state_dim)
            '''

            self.num_timesteps += 1

            # Stop training if return value is False
            # if callback.on_step() is False:
            #    break

            # Store transition in the replay buffer.
            # Loop for replay buffer -- either separate or joined. obs[agent_index], action[agent_index], reward[agent_index]
            # Joey: Does this look right to you?
            # print(obs, action, rew, new_obs, done)
            #print("obs",obs[0])
            #print(action)
            #print("ac", action[0])
            #print("rew", rew[0])
            #print("done", done[0])
            for num_agent in range(self.num_agents):
                self.replay_buffer.add(obs[num_agent], env_action[num_agent],
                                       rew[num_agent], new_obs[num_agent],
                                       float(done[num_agent]))
            obs = new_obs

            # if writer is not None:
            #     ep_rew = np.array([rew]).reshape((1, -1))
            #     ep_done = np.array([done]).reshape((1, -1))
            #     tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
            #                                         self.num_timesteps)

            # TODO: current episode_rewards is a list, make it a list of lists where each list is the reward for each agent in all timesteps
            #     append the newest reward to the end of each list for each agent
            for num_agent in range(self.num_agents):  #MA-MOD
                episode_rewards[-1][num_agent] += rew[num_agent]
                if done.any():
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append([0.0] * self.num_agents)
                    reset = True

            # Do not train if the warmup phase is not over
            # or if there are not enough samples in the replay buffer
            can_sample = self.replay_buffer.can_sample(self.batch_size)
            if can_sample and self.num_timesteps > self.learning_starts \
                    and self.num_timesteps % self.train_freq == 0:

                # callback.on_rollout_end()

                for i in range(self.num_agents):  # MA-MOD
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert self.beta_schedule is not None, \
                                "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # pytype:enable=bad-unpacking

                    # if writer is not None:
                    #     # run loss backprop with summary, but once every 100 steps save the metadata
                    #     # (memory, compute time, ...)
                    #     if (1 + self.num_timesteps) % 100 == 0:
                    #         run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    #         run_metadata = tf.RunMetadata()
                    #         summary, td_errors = self._train_step[i](obses_t, actions, rewards, obses_tp1, obses_tp1,
                    #                                               dones, weights, sess=self.sess, options=run_options,
                    #                                               run_metadata=run_metadata)
                    #         writer.add_run_metadata(run_metadata, 'step%d_agent%d' % (self.num_timesteps, i))
                    #     else:
                    #         summary, td_errors = self._train_step[i](obses_t, actions, rewards, obses_tp1, obses_tp1,
                    #                                               dones, weights, sess=self.sess)
                    #     writer.add_summary(summary, self.num_timesteps)
                    # else:
                    td_errors = self._train_step[i](obses_t,
                                                    actions,
                                                    rewards,
                                                    obses_tp1,
                                                    obses_tp1,
                                                    dones,
                                                    weights,
                                                    sess=self.sess)

                if self.prioritized_replay:  # NOUPDATE - not inside main agent for loop
                    new_priorities = np.abs(
                        td_errors) + self.prioritized_replay_eps  # NOUPDATE
                    assert isinstance(self.replay_buffer,
                                      PrioritizedReplayBuffer)
                    self.replay_buffer.update_priorities(
                        batch_idxes, new_priorities)

                # callback.on_rollout_start()

            if can_sample and self.num_timesteps > self.learning_starts and \
                    self.num_timesteps % self.target_network_update_freq == 0:
                # Update target network periodically.
                for i in range(self.num_agents):
                    self.update_target[i](sess=self.sess)  # MA-MOD

            if len(episode_rewards[-101:-1]) == 0:  # MA-MOD
                mean_100ep_reward = -np.inf
            else:
                mean_100ep_reward = round(
                    float(np.mean(episode_rewards[-101:-1])), 1)  #MA-MOD

            # below is what's logged in terminal.
            num_episodes = len(episode_rewards)  #MA-MOD
            if self.verbose >= 1 and done.any(
            ) and log_interval is not None and len(
                    episode_rewards) % log_interval == 0:  #MA-MOD
                logger.record_tabular("steps", self.num_timesteps)
                logger.record_tabular("episodes", num_episodes)
                if len(episode_successes) > 0:
                    logger.logkv("success rate",
                                 np.mean(episode_successes[-100:]))
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular(
                    "% time spent exploring",
                    int(100 * self.exploration.value(self.num_timesteps)))
                logger.dump_tabular()

        return self
Exemplo n.º 6
0
               isEscaping=False,
               enemy_model=defender_model,
               enemy_step_limit=defender_step_limit,
               enemy_step_size=defender_step_size,
               enemy_maxspeed=defender_maxspeed,
               enemy_acceleration=defender_acceleration)

env = DummyVecEnv([lambda: env])
env2 = DummyVecEnv([lambda: env2])

attacker_model.set_env(env)
defender_model.set_env(env2)

timesteps2 = 500000

scheduler = LinearSchedule(timesteps2, 0.001, 0.0001)
my_learning_rate2 = scheduler.value

for i in range(10000):
    defender_model.learn(total_timesteps=timesteps2,
                         tb_log_name=defender_name + str(i),
                         log_interval=100)
    attacker_model.learn(total_timesteps=timesteps2 // 2,
                         tb_log_name=attacker_name + str(i),
                         log_interval=100)

    if (i % 10 == 0):
        attacker_model.save("../Models/" + attacker_name + str(i))
        defender_model.save("../Models/" + defender_name + str(i))

    if (False):
Exemplo n.º 7
0
    def learn(self,
              total_timesteps,
              callback=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              replay_wrapper=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=self.exploration_initial_eps,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_successes = []
            obs = self.env.reset()
            obs_hdqn_old = None
            action_hdqn = None
            reset = True
            F = 0

            for _ in range(total_timesteps):
                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).

                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True

                # Check if agent is busy or idle
                OBS_IS_IDLE = True
                if (OBS_IS_IDLE):
                    if not reset:
                        # Store HDQN transition
                        self.replay_buffer.add(obs_hdqn_old, action_hdqn, F,
                                               obs, float(done))

                    # Select new goal for the agent using the current Q function
                    action = self.act(np.array(obs)[None],
                                      update_eps=update_eps,
                                      **kwargs)[0]
                    env_action = action

                    # Update bookkeepping for next HDQN buffer update
                    obs_hdqn_old = obs
                    action_hdqn = env_action
                    F = 0.
                else:
                    # Agent is busy, so select a dummy action (it will be ignored anyway)
                    env_action = 0

                reset = False
                new_obs, rew, done, info = self.env.step(env_action)
                F = F + rew

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    total_episode_reward_logger(self.episode_reward, ep_rew,
                                                ep_done, writer,
                                                self.num_timesteps)

                episode_rewards[-1] += rew

                if done:
                    # Store HDQN transition
                    self.replay_buffer.add(obs_hdqn_old, action_hdqn, F, obs,
                                           float(done))

                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert self.beta_schedule is not None, \
                               "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # pytype:enable=bad-unpacking

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(
                                run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        assert isinstance(self.replay_buffer,
                                          PrioritizedReplayBuffer)
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate",
                                     np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

                self.num_timesteps += 1

        return self
Exemplo n.º 8
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              initial_p=1.0):

        self.actions_weights = []
        self.actions_container = []

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)

        cnt = 0
        ds_rewards = [[0, 0]]
        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None
            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=initial_p,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            obs = self.env.reset()

            reset = True
            self.episode_reward = np.zeros((1, ))

            for _ in range(total_timesteps):
                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                ''' Hierarchical Step (Start) '''

                obs, new_obs, rew, action, done, reset = self.hierarchical_step(
                    obs, ds_rewards, cnt, kwargs, update_eps)
                ''' Hierarchical Step (End) '''

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    self.episode_reward = total_episode_reward_logger(
                        self.episode_reward, ep_rew, ep_done, writer,
                        self.num_timesteps)

                episode_rewards[-1] += rew
                if done:
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                if self.num_timesteps > self.learning_starts and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if self.prioritized_replay:
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(
                                run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        try:
                            new_priorities = np.array([
                                abs(x) for x in td_errors.tolist()
                            ]) + self.prioritized_replay_eps
                            self.replay_buffer.update_priorities(
                                batch_idxes, new_priorities)
                        except AssertionError:
                            print(td_errors)

                if self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

                self.num_timesteps += 1
        return self, ds_rewards
Exemplo n.º 9
0
    def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DQN",
              reset_num_timesteps=True, replay_wrapper=None, save_interval=None, save_path=None):

        print('----------------------------------------------')
        print('|                 L E A R N                  |')
        print('----------------------------------------------')
        print("num timesteps = " + str(int(total_timesteps / 1000)) + 'k')
        print("save_interval = " + str(int(save_interval / 1000)) + 'k')
        print()
        k = 10
        save_interval_st = save_interval

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                                    initial_p=self.prioritized_replay_beta0,
                                                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)  # 升级

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),
                                              initial_p=1.0,
                                              final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_win_rates = [0.0]
            episode_successes = []
            obs, obs_nf = self.env.reset()
            reset = True
            self.episode_reward = np.zeros((1,))
            self.win_rate = np.zeros((1,))
            # print(obs_nf)
            """
            探索使用prune
            """
            prev2s = [None, None]

            def input_formate(obs):
                return obs.transpose((1, 2, 0))

            for _ in tqdm(range(total_timesteps)):
                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                # tf.summary.scalar('update_eps', update_eps)
                with self.sess.as_default():
                    # 永不探索 原本为update_eps=update_eps
                    action = self.act(np.array(input_formate(obs))[None], update_eps=-1, **kwargs)[0]
                    filter_action = random.randint(0, 5)
                    if type(obs_nf) == tuple:
                        obs_nf = obs_nf[0]
                    filter_action = feature_utils.get_modify_act(obs_nf, filter_action, prev2s, nokick=True)
                    filter_action = feature_utils.get_act_abs(obs_nf, filter_action, rang=8)
                    # 统计100次filter_actions的概率
                    fil_acts = []
                    for _ in range(100):
                        rand_act = random.randint(0, 5)
                        fil_act = feature_utils.get_modify_act(obs_nf, rand_act, prev2s, nokick=True)
                        fil_act = feature_utils.get_act_abs(obs_nf, fil_act, rang=8)
                        fil_acts.append(fil_act)
                    # print('fil', fil_acts)
                    # print()
                    fil_acts = np.eye(65)[fil_acts]
                    # print('eye', fil_acts)
                    # print()
                    fil_acts = fil_acts.sum(axis=0)
                    # print('sum', fil_acts)
                    # print()

                    if random.random() < update_eps:
                        action = filter_action

                env_action = action
                reset = False
                new_obs, rew, done, info, new_obs_nf = self.env.step(env_action)  # .ntc
                self.replay_buffer.add(input_formate(obs), action, rew, input_formate(new_obs), float(done), fil_acts)

                '''
                    HER
                '''
                self.temp_buffer.append((obs, action, rew, new_obs, float(done), fil_acts))
                if len(self.temp_buffer) >= self.temp_size:
                    for t in range(self.temp_size):
                        s, a, r, s_n, d, fa = self.temp_buffer[t]
                        for k in range(self.k):
                            _s = copy.deepcopy(s)
                            _a = a
                            _r = copy.deepcopy(r)
                            _s_n = copy.deepcopy(s_n)
                            future = np.random.randint(t, self.temp_size)
                            s_f, _a_f, _, _, _, _ = self.temp_buffer[future]
                            g_map = s_f[-2]
                            _s[-1] = g_map
                            # print(_s_n[-2][goal])
                            if (_s_n[-2] == g_map).all() or (
                                    (_s[-2] == _s[-1]).all() and _a_f == a == 64):  # 判断_s是否通过a到达goal
                                # if (_s[-2]) or g == 64:  # 是否为原地不动
                                # print('HER')
                                _r = _r + 0.01
                            self.replay_buffer.add(input_formate(_s), a, _r, input_formate(_s_n), d, fa)
                    self.temp_buffer.clear()

                obs = new_obs
                obs_nf = new_obs_nf

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_win = np.array([info]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    self.episode_reward = total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
                                                                      self.num_timesteps)
                    self.win_rate = total_rate_logger(self.win_rate, ep_win, ep_done, writer,
                                                      self.num_timesteps, name='win_rate')

                episode_rewards[-1] += rew
                episode_win_rates[-1] += info
                if done:
                    maybe_is_success = (rew > 0)  # info.get('is_success')  # .ntc
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs, obs_nf = self.env.reset()
                    episode_rewards.append(0.0)
                    episode_win_rates.append(0.0)
                    reset = True
                    prev2s = [None, None]

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:
                    # print('Sampling ... ...', self.num_timesteps)
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if self.prioritized_replay:
                        experience = self.replay_buffer.sample(self.batch_size,
                                                               beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones, filter_actions = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # print(rewards.shape)
                    # print(dones.shape)
                    # print(actions.shape)
                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        # print("fils", filter_actions)
                        # print("acts", actions)
                        # print('   Training ... ...')
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors, kl_errors = self._train_step(obses_t, actions, rewards, obses_tp1,
                                                                             obses_tp1,
                                                                             dones, weights, filter_actions,
                                                                             sess=self.sess, options=run_options,
                                                                             run_metadata=run_metadata)
                            writer.add_run_metadata(run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors, kl_errors = self._train_step(obses_t, actions, rewards, obses_tp1,
                                                                                 obses_tp1,
                                                                                 dones, weights, filter_actions,
                                                                                 sess=self.sess)
                            # print('er', pr[0])
                            # print('kl', pr[1])
                            # print('x', pr[2])
                            # print('y', pr[3])
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1, dones, weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(td_errors) + self.prioritized_replay_eps
                        self.replay_buffer.update_priorities(batch_idxes, new_priorities)

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)

                if len(episode_win_rates[-101:-1]) == 0:
                    mean_100ep_win_rate = -np.inf
                else:
                    mean_100ep_win_rate = round(float(np.mean(episode_win_rates[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate", np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                    logger.record_tabular("mean 100 win rate", mean_100ep_win_rate)
                    logger.record_tabular("% time spent exploring",
                                          int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

                # save interval
                if self.num_timesteps >= save_interval_st:
                    save_interval_st += save_interval
                    s_path = save_path + '_' + str(int(self.num_timesteps / 1000)) + 'k.zip'
                    self.save(save_path=s_path)

                self.num_timesteps += 1

        return self
Exemplo n.º 10
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              tb_log_name="A2C"):
        with SetVerbosity(self.verbose), TensorboardWriter(
                self.graph, self.tensorboard_log, tb_log_name) as writer:
            self._setup_learn(seed)

            self.learning_rate_schedule = Scheduler(
                initial_value=self.learning_rate,
                n_values=total_timesteps,
                schedule=self.lr_schedule)

            # Entropy tobe a large in the beginning
            self.ent_coef_schedule = LinearSchedule(
                schedule_timesteps=int(1e6), initial_p=0.1, final_p=0.01)

            runner = A2CRunner(self.env,
                               self,
                               n_steps=self.n_steps,
                               gamma=self.gamma)
            self.episode_reward = np.zeros((self.n_envs, ))

            t_start = time.time()
            for update in range(1, total_timesteps // self.n_batch + 1):
                # true_reward is the reward without discount
                obs, states, rewards, masks, actions, values, true_reward = runner.run(
                )
                _, value_loss, policy_entropy = self._train_step(
                    update * self.n_batch, obs, states, rewards, masks,
                    actions, values, update, writer)

                n_seconds = time.time() - t_start
                fps = int((update * self.n_batch) / n_seconds)

                if writer is not None:
                    self.episode_reward = total_episode_reward_logger(
                        self.episode_reward,
                        true_reward.reshape((self.n_envs, self.n_steps)),
                        masks.reshape((self.n_envs, self.n_steps)), writer,
                        update * (self.n_batch + 1))

                if callback is not None:
                    callback(locals(), globals())

                if self.verbose >= 1 and (update % log_interval == 0
                                          or update == 1):
                    explained_var = explained_variance(values, rewards)
                    logger.record_tabular("nupdates", update)
                    logger.record_tabular("total_timesteps",
                                          update * self.n_batch)
                    logger.record_tabular("fps", fps)
                    logger.record_tabular("policy_entropy",
                                          float(policy_entropy))
                    logger.record_tabular("value_loss", float(value_loss))
                    logger.record_tabular("explained_variance",
                                          float(explained_var))
                    logger.dump_tabular()

        return self
Exemplo n.º 11
0
    def learn(self,
              total_timesteps,
              seed=None,
              tb_log_name='DQN',
              test_interval=1,
              reset_num_timesteps=True):
        if reset_num_timesteps:
            self.num_timesteps = 0

        with TensorboardWriter(self.graph, self.tensorboard_log,
                               tb_log_name) as writer:
            self._setup_learn(seed)

            self.replay_buffer = ReplayBuffer(size=self.buffer_size)
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=1.0,
                final_p=self.exploration_final_eps)
            episode_rewards = [0.0]
            obs = self.env.reset(train=True)

            best_train_score = None
            best_test_score = None
            self.reward_curve = []

            for _ in range(total_timesteps):
                update_eps = self.exploration.value(self.num_timesteps)
                with self.sess.as_default():
                    action = self.act(np.array(obs)[None],
                                      update_eps=update_eps)[0]
                new_obs, rew, done, _ = self.env.step(action)

                self.replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                episode_rewards[-1] += rew

                if self.num_timesteps > self.learning_starts:
                    obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                        self.batch_size)
                    weights = np.ones_like(rewards)
                    if writer is not None:
                        if (1 + self.num_timesteps) % 100 == 0:
                            summary, td_errors = self.train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                            writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self.train_step(obses_t,
                                                       actions,
                                                       rewards,
                                                       obses_tp1,
                                                       obses_tp1,
                                                       dones,
                                                       weights,
                                                       sess=self.sess)

                if self.num_timesteps > self.learning_starts and self.num_timesteps % self.target_network_update_freq == 0:
                    self.update_target(sess=self.sess)

                if done:

                    print('-------------------------------------')
                    print('steps                     | {}'.format(
                        self.num_timesteps))
                    print('episodes                  | {}'.format(
                        len(episode_rewards)))
                    epsilon = int(100 *
                                  self.exploration.value(self.num_timesteps))
                    print('% time spent exploring    | {}'.format(epsilon))
                    print('--')

                    mean_100ep_reward = -np.inf if len(
                        episode_rewards[-16:-1]) == 0 else round(
                            float(np.mean(episode_rewards[-16:-1])), 1)
                    self.reward_curve.append(mean_100ep_reward)
                    print('mean 10 episode reward    | {:.1f}'.format(
                        mean_100ep_reward))

                    journal = self.env.sim.journal
                    print('Total operations          | {}'.format(
                        len(self.env.sim.journal)))
                    longs = [x for x in journal if x['Type'] == 'LONG']
                    shorts = [x for x in journal if x['Type'] == 'SHORT']
                    print('Long/Short                | {}/{}'.format(
                        len(longs), len(shorts)))
                    print('Avg duration trades       | {:.2f}'.format(
                        np.mean([j['Trade Duration'] for j in journal])))
                    total_profit = sum([j['Profit'] for j in journal])
                    print('Total profit              | {:.2f}'.format(
                        total_profit))
                    print('Avg profit per trade      | {:.3f}'.format(
                        total_profit / self.env.sim.total_trades))

                    if epsilon <= self.exploration_final_eps * 100:
                        if best_train_score is None or total_profit > best_train_score:
                            self.save('saves/best_model_train.pkl')
                            best_train_score = total_profit

                    if self.num_timesteps % test_interval == 0:
                        print('--')
                        test_episode_rewards, test_longs, test_shorts, test_ave_profit_per_trade = self.test(
                        )
                        print('Total profit test         > {:.2f}'.format(
                            test_episode_rewards))
                        print('Long/Short test           > {}/{}'.format(
                            test_longs, test_shorts))
                        print('Avg profit per trade test > {:.3f}'.format(
                            test_ave_profit_per_trade))

                        if epsilon <= self.exploration_final_eps * 100:
                            if best_test_score is None or test_episode_rewards > best_test_score:
                                self.save('saves/best_model_test.pkl')
                                best_test_score = test_episode_rewards
                    print('-------------------------------------')

                    obs = self.env.reset()
                    episode_rewards.append(0.0)

                    if self.num_timesteps + (
                            self.num_timesteps /
                            len(episode_rewards)) >= total_timesteps:
                        self.save('saves/final_model.pkl')
                        break

                self.num_timesteps += 1
        return self
Exemplo n.º 12
0
    def learn(self,
              total_timesteps,
              callback=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              replay_wrapper=None,
              distinct_replay_buffer=False):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)
        for i, m in enumerate(self.sub_models):
            m.learning_rate = get_schedule_fn(m.learning_rate)
            if len(self.replay_wrappers) != 0:
                m.replay_buffer = self.replay_wrappers[i](m.replay_buffer)
            m._setup_learn()

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=self.exploration_initial_eps,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_successes = []
            obs = self.env.reset()
            reset = True
            macro_count = 0
            macro_len = self.macro_len
            macro_choices = []
            n_updates = 0

            for step in range(total_timesteps):
                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break

                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    if reset or macro_count % macro_len == 0:
                        macro_action = self.act(np.array(obs)[None],
                                                update_eps=update_eps,
                                                **kwargs)[0]
                        # macro_action = 1
                        macro_obs = obs
                        reward_in_one_macro = 0
                    macro_count += 1
                    macro_choices.append(macro_action)

                # use sub_model to decide action
                # env_action = self.sub_models[macro_action]
                current_sub = self.sub_models[macro_action]
                if self.num_timesteps < self.learning_starts or np.random.rand(
                ) < current_sub.random_exploration:
                    # actions sampled from action space are from range specific to the environment
                    # but algorithm operates on tanh-squashed actions therefore simple scaling is used
                    unscaled_action = self.env.action_space.sample()
                    action = scale_action(self.env.action_space,
                                          unscaled_action)
                else:
                    action = current_sub.policy_tf.step(
                        obs[None], deterministic=False).flatten()
                    # Add noise to the action (improve exploration,
                    # not needed in general)
                    if current_sub.action_noise is not None:
                        action = np.clip(action + current_sub.action_noise(),
                                         -1, 1)
                    # inferred actions need to be transformed to environment action_space before stepping
                    unscaled_action = unscale_action(self.env.action_space,
                                                     action)
                assert action.shape == self.env.action_space.shape

                reset = False
                new_obs, rew, done, info = self.env.step(unscaled_action)
                episode_rewards[-1] += rew
                # rew -= self.args.policy_cost_coef * self.args.sub_policy_costs[macro_action]
                reward_in_one_macro += rew - self.args.policy_cost_coef * self.args.sub_policy_costs[
                    macro_action]
                # Store transition in the replay buffer.
                if macro_count % macro_len == 0 or done:
                    self.replay_buffer.add(macro_obs, macro_action,
                                           reward_in_one_macro, new_obs,
                                           float(done))
                for i, m in enumerate(self.sub_models):
                    if distinct_replay_buffer:
                        if i == macro_action:
                            m.replay_buffer.add(obs, action, rew, new_obs,
                                                float(done))
                    else:
                        m.replay_buffer.add(obs, action, rew, new_obs,
                                            float(done))
                obs = new_obs

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    total_episode_reward_logger(self.episode_reward, ep_rew,
                                                ep_done, writer,
                                                self.num_timesteps)

                # print("step: %d, done: %d" % (self.num_timesteps, done))
                if done:
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True
                    macro_action = None
                    macro_count = 0
                    prev_macro_choices = macro_choices
                    macro_choices = []

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert self.beta_schedule is not None, \
                               "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # pytype:enable=bad-unpacking

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(
                                run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        assert isinstance(self.replay_buffer,
                                          PrioritizedReplayBuffer)
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if step % self.sub_models[0].train_freq == 0:
                    mb_infos_vals = []
                    for m in self.sub_models:
                        # Update policy, critics and target networks
                        for grad_step in range(m.gradient_steps):
                            # Break if the warmup phase is not over
                            # or if there are not enough samples in the replay buffer
                            if not m.replay_buffer.can_sample(m.batch_size) \
                               or self.num_timesteps < m.learning_starts:
                                break
                            n_updates += 1
                            # Compute current learning_rate
                            frac = 1.0 - step / total_timesteps
                            current_lr = m.learning_rate(frac)
                            # Update policy and critics (q functions)
                            mb_infos_vals.append(
                                m._train_step(step, writer, current_lr))
                            # Update target network
                            if (step +
                                    grad_step) % m.target_update_interval == 0:
                                # Update target network
                                m.sess.run(m.target_update_op)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                # print(done, log_interval, len(episode_rewards), self.num_timesteps)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    prev_macro_choices = np.array(prev_macro_choices)
                    macro_choices_ratio = [
                        '%.2f' %
                        ((prev_macro_choices[prev_macro_choices == i]).size /
                         prev_macro_choices.size)
                        for i in range(self.n_actions)
                    ]
                    logger.record_tabular("macro choices", macro_choices_ratio)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate",
                                     np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)))
                    logger.logkv("n_updates_of_sub", n_updates)
                    logger.dump_tabular()
                    print("macro choices", prev_macro_choices)

                self.num_timesteps += 1

        return self
Exemplo n.º 13
0
from stable_baselines.common.schedules import ConstantSchedule, LinearSchedule

my_step_limit = 250
my_step_size = 0.01745*5
my_maxspeed = 1
my_randomBall = True
my_binaryReward = True

print("CARS_PPO2_DISCRETE.py LESS GO")

env = CustomEnv(step_limit=my_step_limit, step_size = my_step_size, maxspeed = my_maxspeed, randomBall = my_randomBall, binaryReward= my_binaryReward) # 0.01745*5
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
# env = DummyVecEnv([lambda: env])
timesteps = 150000
my_learning_rate = LinearSchedule(timesteps, 0.005, 0.0001) # default: 0.00025

name = "CARS_BNR_fixedShape_newObs_ppo2_LR_"  + "LinearSchedule_"  + "timesteps_" + str(timesteps) + "ep_length_" + str(my_step_limit) + "turnrate_" + str(my_step_size) + "maxspeed_" + str(my_maxspeed) + "randomBall_" + str(my_randomBall) + "binaryReward_" + str(my_binaryReward)
# Configure tensorflow using GPU
# Use tensorboard to show reward over time etc
model = PPO2(MlpPolicy, env, learning_rate= my_learning_rate.value, verbose=1, tensorboard_log="/home/fritz/Documents/BA/TensorBoardLogs/CARS3") # defaults: learning_rate=2.5e-4,
model.learn(total_timesteps=timesteps, tb_log_name= name)



model.save("../Models/" + name)

try:
    f = open("../Envparameters/envparameters_" + name, "x")
    f.write(str([my_step_limit, my_step_size, my_maxspeed, my_randomBall, my_binaryReward]))
    f.close()
Exemplo n.º 14
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              replay_wrapper=None,
              learning_curve=False,
              test_t=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn(seed)

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=1.0,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            self.cumul_reward = [0.0]
            episode_successes = []
            obs = self.env.reset()
            reset = True
            self.episode_reward = np.zeros((1, ))

            # variables for test eval ##
            test_step = test_t * 3
            test_results = {'sum': []}
            test_ts = []

            for _ in range(total_timesteps):

                ## Test eval period ##
                if learning_curve and _ % test_step == 0 and _ > 0:
                    print("--> Simulating test period")
                    self.env.reset()
                    test_r = 0.0
                    for i in range(test_t):
                        feasible_actions = AllocationEnv.get_feasible_actions(
                            obs["board_config"])
                        action_mask = AllocationEnv.get_action_mask(
                            feasible_actions, self.env.action_space.n)
                        action, _states = self.predict(obs, mask=action_mask)
                        action = AllocationEnv.check_action(
                            obs['board_config'], action)
                        obs, rewards, dones, info = self.env.step(action)
                        test_r += rewards

                    test_results["sum"].append(test_r)
                    test_ts.append(_)
                    self.env.reset()

                    # plot test eval progress
                    plt.plot(test_ts, test_results["sum"])
                    # plt.errorbar(iteration_cuts, results["mean"], yerr=results["std"], fmt='.k')
                    plt.xlabel("Iteration count")
                    plt.ylabel("Total (sum) test reward")
                    plt.savefig("figs/rl-learning-curve-{}.pdf".format(
                        cfg.vals['prj_name']))
                    plt.clf()
                    plt.close()

                    # write test eval progress
                    write_results = {}
                    for k, v in test_results.items():
                        write_results[k] = serialize_floats(v)

                    with open(
                            "output/rl-learning-curve-{}.json".format(
                                cfg.vals['prj_name']), 'w') as f:
                        json.dump(write_results, f)

                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True

                feasible_actions = AllocationEnv.get_feasible_actions(
                    obs["board_config"])
                action_mask = AllocationEnv.get_action_mask(
                    feasible_actions, self.action_space.n)
                with self.sess.as_default():
                    action = self.act(State.get_vec_observation(obs)[None],
                                      update_eps=update_eps,
                                      **kwargs,
                                      mask=action_mask)[0]
                reset = False
                # CHECK IF ACTIONS IS FEASIBLE
                action = AllocationEnv.check_action(obs['board_config'],
                                                    action)
                env_action = action
                new_obs, rew, done, info = self.env.step(env_action)
                print("action: {} - reward: {} - eps: {:.4}".format(
                    action, rew, update_eps))
                print(new_obs['day_vec'])
                print(new_obs['board_config'])
                # Store transition in the replay buffer.
                self.replay_buffer.add(State.get_vec_observation(obs), action,
                                       rew, State.get_vec_observation(new_obs),
                                       float(done))
                obs = new_obs

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    self.episode_reward = total_episode_reward_logger(
                        self.episode_reward, ep_rew, ep_done, writer,
                        self.num_timesteps)

                episode_rewards[-1] += rew
                self.cumul_reward.append(self.cumul_reward[-1] + rew)
                if done:
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                    and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if self.prioritized_replay:
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(
                                run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate",
                                     np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()
                print('timestamp: {}'.format(self.num_timesteps, end='\r\n'))
                self.num_timesteps += 1

        return self
Exemplo n.º 15
0
def make_behavior_policy(env_id, base_env, learner, teach_behavior_policy,
                         teachers_conf, behavior_policy_params):
    """
    Create a behavioral policy for training.

    :param env_id: The name of the id for training
    :param base_env: An actual instance of the env, without any wrappers around it
    :param learner: The learner object (one of the classes from learners.py)
    :param teach_behavior_policy: The name of the behavior_policy to create
    :param teachers_conf: The configuration for the teachers
    :param behavior_policy_params: The params for the behavior policy
    """
    teachers = []
    behavior_policy = None
    if teach_behavior_policy is None:
        return None

    for teacher_conf in teachers_conf:
        action_noise = None
        if 'noise_type' in teacher_conf:
            nb_actions = base_env.action_space.shape[-1]
            noise, _ = parse_noise_types(teacher_conf.pop('noise_type'),
                                         nb_actions)
        else:
            noise = None
        if teacher_conf['type'] == 'pretrained':
            pretrained_agent = PPO2.load(teacher_conf['model_path'] % env_id)
            if noise is not None:
                teacher_fns = [
                    lambda obs: pretrained_agent.predict(obs,
                                                         deterministic=False)
                    [0] / np.abs(learner.action_space.low) + noise()
                ]
            else:
                teacher_fns = [
                    lambda obs: pretrained_agent.predict(obs,
                                                         deterministic=False)
                    [0] / np.abs(learner.action_space.low)
                ]
        else:
            teacher_fns = base_env.make_teachers(teacher_conf.pop('type'),
                                                 noise, **teacher_conf)
        teachers += teacher_fns

    if teach_behavior_policy == 'random':
        behavior_policy = RandomBehaviorPolicy(base_env, learner, teachers,
                                               **behavior_policy_params)
    elif teach_behavior_policy == 'only_teacher':
        behavior_policy = OnlyTeacherBehaviorPolicy(base_env, learner,
                                                    teachers)
    elif teach_behavior_policy == 'optimal':
        if env_id == 'OneGoalPickPlaceEnv-v0':
            behavior_policy = OptimalPickPlaceBehaviorPolicy(learner, teachers)
        elif env_id == 'SanityPath-v0':
            behavior_policy = OptimalPathBehaviorPolicy(learner, teachers)
        elif env_id == 'SanityPathMujoco-v0':
            behavior_policy = OptimalReachBehaviorPolicy(learner, teachers)

    elif teach_behavior_policy == 'dqn' or teach_behavior_policy == 'bdqn':
        dqn_env = gym.make(env_id)
        if 'use_learner' not in behavior_policy_params or behavior_policy_params[
                'use_learner']:
            dqn_env.action_space = spaces.Discrete(len(teachers) + 1)
        else:
            dqn_env.action_space = spaces.Discrete(len(teachers))
        if teach_behavior_policy == 'dqn':
            dqn = NiceDQN(env=dqn_env,
                          policy=MlpPolicy,
                          **behavior_policy_params.pop('dqn_params'))

            dqn.exploration = LinearSchedule(schedule_timesteps=int(
                behavior_policy_params.pop('num_timesteps') * 0.2),
                                             initial_p=1.0,
                                             final_p=0.02)
            behavior_policy = DQNChoiceBehaviorPolicy(base_env, learner,
                                                      teachers, dqn,
                                                      **behavior_policy_params)
        else:
            bdqn = BDQN(env=dqn_env,
                        **behavior_policy_params.pop('bdqn_params'))
            behavior_policy = BDQNUncertaintyBehaviorPolicy(
                base_env, learner, teachers, bdqn, **behavior_policy_params)
    elif teach_behavior_policy == 'critic':
        behavior_policy = CriticBehaviorPolicy(base_env, learner, teachers,
                                               learner,
                                               **behavior_policy_params)
    elif teach_behavior_policy == 'acteach':
        behavior_policy = ACTeachBehaviorPolicy(base_env, learner, teachers,
                                                learner,
                                                **behavior_policy_params)
    elif teach_behavior_policy is not None:
        raise ValueError('%s not a valid learning behavior policy' %
                         teach_behavior_policy)

    return behavior_policy
Exemplo n.º 16
0
def train(method="SAC"):
    def get_multi_process_env(num_of_envs,
                              subprocess=True,
                              amplitude_scaling=False,
                              frameskip=5,
                              with_goals=False,
                              action_type=ActionType.POSITION,
                              difficulty=1,
                              initializer="random",
                              testing=False):

        if initializer == "random":
            initializer = RandomInitializer(difficulty=difficulty)
        elif initializer == "completely_random":
            initializer = CompletelyRandomInitializer()

        def _make_env(rank):
            def _init():
                obs_type = ObservationType.WITH_GOALS if with_goals else ObservationType.WITHOUT_GOALS
                out_env = CubeEnv(frameskip=frameskip,
                                  visualization=False,
                                  initializer=initializer,
                                  action_type=action_type,
                                  observation_type=obs_type,
                                  testing=testing)
                out_env.seed(seed=54321)
                out_env.action_space.seed(seed=54321)
                if not with_goals:
                    out_env = FlatObservationWrapper(
                        out_env, amplitude_scaling=amplitude_scaling)
                    out_env = TimeFeatureWrapper(out_env,
                                                 max_steps=math.ceil(
                                                     3750 / frameskip))
                else:
                    out_env = GoalObservationWrapper(
                        out_env, amplitude_scaling=amplitude_scaling)
                return out_env

            return _init

        if subprocess:
            return SubprocVecEnv(
                [_make_env(rank=i) for i in range(num_of_envs)])
        else:
            return DummyVecEnv([_make_env(rank=i) for i in range(num_of_envs)])

    date_time_str = datetime.now().strftime("%m_%d_%Y_%H_%M_%S_")
    print(method, date_time_str)
    set_global_seeds(0)

    if method == "HER":
        env = get_multi_process_env(1,
                                    subprocess=False,
                                    amplitude_scaling=True,
                                    frameskip=5,
                                    with_goals=True)
        env.set_attr("reward_range", 1000)
        policy_kwargs = dict(layers=[128, 128], act_fun=tf.tanh)

        n_actions = env.action_space.shape[-1]
        action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),
                                                    sigma=float(0.2) *
                                                    np.ones(n_actions))

        model = HER("MlpPolicy",
                    env,
                    SAC,
                    policy_kwargs=policy_kwargs,
                    n_sampled_goal=4,
                    goal_selection_strategy='future',
                    verbose=1,
                    tensorboard_log="tblogs",
                    batch_size=512,
                    buffer_size=100000,
                    gamma=0.98,
                    learning_starts=10000,
                    random_exploration=0.15)
        model.learn(int(2e6),
                    log_interval=10,
                    callback=CheckpointCallback(
                        save_freq=int(1e5),
                        save_path='models/checkpoint_saves',
                        name_prefix=method + '_' + date_time_str),
                    tb_log_name=method + '_' + date_time_str)
    if method == "SAC":
        env = VecNormalize(VecFrameStack(
            get_multi_process_env(1,
                                  subprocess=False,
                                  amplitude_scaling=False,
                                  frameskip=5,
                                  action_type=ActionType.POSITION,
                                  difficulty=1,
                                  initializer="completely_random"), 4),
                           norm_reward=False,
                           clip_reward=1500,
                           gamma=0.99)
        policy_kwargs = dict(layers=[256, 256])

        n_actions = env.action_space.shape[-1]
        action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),
                                                    sigma=float(0.2) *
                                                    np.ones(n_actions))
        model = SAC("LnMlpPolicy",
                    env,
                    policy_kwargs=policy_kwargs,
                    buffer_size=1000000,
                    batch_size=256,
                    gamma=0.99,
                    learning_rate=LinearSchedule(int(2e6),
                                                 5e-5,
                                                 initial_p=3e-4).value,
                    train_freq=64,
                    gradient_steps=4,
                    tau=0.005,
                    learning_starts=10000,
                    tensorboard_log="tblogs",
                    verbose=1,
                    use_emph_exp=True,
                    action_noise=action_noise)
        model.learn(int(2e6),
                    log_interval=10,
                    callback=CheckpointCallback(
                        save_freq=int(5e5),
                        save_path='models/checkpoint_saves',
                        name_prefix=method + '_' + date_time_str),
                    tb_log_name=method + '_' + date_time_str)
        env.save("normalized_env_" + date_time_str)
    if method == "CONTINUE_SAC":
        difficulty = 4
        env = VecNormalize.load(
            "models/normalized_env_frame_stacked_model",
            VecFrameStack(
                get_multi_process_env(1,
                                      subprocess=False,
                                      amplitude_scaling=True,
                                      frameskip=5,
                                      action_type=ActionType.POSITION,
                                      difficulty=difficulty,
                                      initializer="random",
                                      testing=True), 4))

        model = SAC.load(
            "models/checkpoint_saves/SAC_09_18_2020_19_07_42__1000000_steps.zip",
            env=env,
            tensorboard_log="tblogs",
        )
        model.learn(int(1e6),
                    log_interval=10,
                    callback=CheckpointCallback(
                        save_freq=int(5e5),
                        save_path='models/checkpoint_saves',
                        name_prefix=method + '_' + date_time_str),
                    tb_log_name=method + '_' + date_time_str)
        env.save("normalized_env_difficulty_" + str(difficulty))
        model.save(
            os.path.join('models', "model_difficulty_" + str(difficulty)))
    if method == "save_vec_env":
        env = VecNormalize(
            get_multi_process_env(1,
                                  subprocess=False,
                                  amplitude_scaling=True,
                                  frameskip=5,
                                  action_type=ActionType.POSITION,
                                  difficulty=1,
                                  initializer="completely_random"))

        model = SAC.load(
            "models/checkpoint_saves/SAC_09_18_2020_14_27_30__2000000_steps.zip",
            env=env)
        model.learn(int(1e5), log_interval=1)
        env.save("normalized_env_without_framestack")
        return
    else:
        return

    print("save model: ", os.path.join('models', method + '_' + date_time_str))
Exemplo n.º 17
0
    def learn(
        self,
        total_timesteps,
        model_coworker,
        role,
        callback=None,
        log_interval=100,
        tb_log_name="DQN",
        reset_num_timesteps=True,
        replay_wrapper=None,
        clipping_during_training=True,
    ):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)
        callback = self._init_callback(callback)

        with SetVerbosity(self.verbose), TensorboardWriter(
                self.graph, self.tensorboard_log, tb_log_name,
                new_tb_log) as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0,
                )
            else:
                if self.replay_buffer is None:
                    self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert (not self.prioritized_replay
                        ), "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=self.exploration_initial_eps,
                final_p=self.exploration_final_eps,
            )

            episode_rewards = [0.0]
            episode_successes = []

            callback.on_training_start(locals(), globals())
            callback.on_rollout_start()

            reset = True
            obs = self.env.reset()
            # Retrieve unnormalized observation for saving into the buffer
            if self._vec_normalize_env is not None:
                obs_ = self._vec_normalize_env.get_original_obs().squeeze()

            for _ in range(total_timesteps):
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.0
                else:
                    update_eps = 0.0
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = -np.log(
                        1.0 - self.exploration.value(self.num_timesteps) +
                        self.exploration.value(self.num_timesteps) /
                        float(self.env.action_space.n))
                    kwargs["reset"] = reset
                    kwargs[
                        "update_param_noise_threshold"] = update_param_noise_threshold
                    kwargs["update_param_noise_scale"] = True
                with self.sess.as_default():
                    action = self.act(np.array(obs)[None],
                                      update_eps=update_eps,
                                      **kwargs)[0]

                turn, speed = None, None
                if role == "turn":
                    turn = action
                    speed, nothing = model_coworker.predict(np.array(obs))
                else:
                    turn, nothing = model_coworker.predict(np.array(obs))
                    speed = action

                if clipping_during_training:
                    # check if next state (after action) would be outside of fish tank (CLIPPING)
                    env_state = self.env.get_state()
                    turn_speed = self.env.action([turn, speed])
                    global_turn = env_state[0][2] + turn_speed[0]
                    coords = np.array([
                        env_state[0][0] + turn_speed[1] * np.cos(global_turn),
                        env_state[0][1] + turn_speed[1] * np.sin(global_turn),
                    ])
                    changed = False
                    if coords[0] < -0.49:
                        coords[0] = -0.47
                        changed = True
                    elif coords[0] > 0.49:
                        coords[0] = 0.47
                        changed = True

                    if coords[1] < -0.49:
                        coords[1] = -0.47
                        changed = True
                    elif coords[1] > 0.49:
                        coords[1] = 0.47
                        changed = True

                    if changed:
                        diff = coords - env_state[0, :2]
                        speed = np.linalg.norm(diff)
                        angles = np.arctan2(diff[1], diff[0])
                        turn = angles - env_state[0, 2]
                        turn = turn - 2 * np.pi if turn > np.pi else turn
                        turn = turn + 2 * np.pi if turn < -np.pi else turn

                        # convert to DQN output
                        dist_turn = np.abs(self.env.turn_rate_bins - turn)
                        dist_speed = np.abs(self.env.speed_bins - speed)

                        # convert to bins
                        turn = np.argmin(dist_turn, axis=0)
                        speed = np.argmin(dist_speed, axis=0)

                        if role == "turn":
                            action = turn
                        else:
                            action = speed

                reset = False
                new_obs, rew, done, info = self.env.step([turn, speed])

                self.num_timesteps += 1

                # Stop training if return value is False
                if callback.on_step() is False:
                    break

                # Store only the unnormalized version
                if self._vec_normalize_env is not None:
                    new_obs_ = self._vec_normalize_env.get_original_obs(
                    ).squeeze()
                    reward_ = self._vec_normalize_env.get_original_reward(
                    ).squeeze()
                else:
                    # Avoid changing the original ones
                    obs_, new_obs_, reward_ = obs, new_obs, rew

                # Store transition in the replay buffer, but change reward to 0 (use it for plot later though)
                self.replay_buffer.add(obs_, action, 0, new_obs_, float(done))

                # Also give transition to model coworker
                if model_coworker.replay_buffer is None:
                    model_coworker.replay_buffer = ReplayBuffer(
                        self.buffer_size)
                if role == "turn":
                    model_coworker.replay_buffer.add(obs_, speed, 0, new_obs_,
                                                     float(done))
                else:
                    model_coworker.replay_buffer.add(obs_, turn, 0, new_obs_,
                                                     float(done))

                obs = new_obs
                # Save the unnormalized observation
                if self._vec_normalize_env is not None:
                    obs_ = new_obs_

                if writer is not None:
                    ep_rew = np.array([reward_]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    tf_util.total_episode_reward_logger(
                        self.episode_reward, ep_rew, ep_done, writer,
                        self.num_timesteps)

                episode_rewards[-1] += reward_
                if done:
                    maybe_is_success = info.get("is_success")
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if (can_sample and self.num_timesteps > self.learning_starts
                        and self.num_timesteps % self.train_freq == 0):

                    callback.on_rollout_end()
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert (
                            self.beta_schedule is not None
                        ), "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps),
                            env=self._vec_normalize_env,
                        )
                        (
                            obses_t,
                            actions,
                            rewards,
                            obses_tp1,
                            dones,
                            weights,
                            batch_idxes,
                        ) = experience
                    else:
                        (
                            obses_t,
                            actions,
                            rewards,
                            obses_tp1,
                            dones,
                        ) = self.replay_buffer.sample(
                            self.batch_size, env=self._vec_normalize_env)
                        # also sample from expert buffer
                        (
                            obses_t_exp,
                            actions_exp,
                            rewards_exp,
                            obses_tp1_exp,
                            dones_exp,
                        ) = self.expert_buffer.sample(
                            self.batch_size, env=self._vec_normalize_env)
                        weights, batch_idxes = np.ones_like(rewards), None
                        weights_exp, batch_idxes_exp = np.ones_like(
                            rewards_exp), None
                    # pytype:enable=bad-unpacking

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                np.append(obses_t, obses_t_exp, axis=0),
                                np.append(actions,
                                          actions_exp.flatten(),
                                          axis=0),
                                np.append(rewards,
                                          rewards_exp.flatten(),
                                          axis=0),
                                np.append(obses_tp1, obses_tp1_exp, axis=0),
                                np.append(obses_tp1, obses_tp1_exp, axis=0),
                                np.append(dones.flatten(),
                                          dones_exp.flatten(),
                                          axis=0),
                                np.append(weights, weights_exp),
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata,
                            )
                            writer.add_run_metadata(
                                run_metadata, "step%d" % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                np.append(obses_t, obses_t_exp, axis=0),
                                np.append(actions,
                                          actions_exp.flatten(),
                                          axis=0),
                                np.append(rewards,
                                          rewards_exp.flatten(),
                                          axis=0),
                                np.append(obses_tp1, obses_tp1_exp, axis=0),
                                np.append(obses_tp1, obses_tp1_exp, axis=0),
                                np.append(dones.flatten(),
                                          dones_exp.flatten(),
                                          axis=0),
                                np.append(weights, weights_exp),
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata,
                            )
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(
                            np.append(obses_t, obses_t_exp, axis=0),
                            np.append(actions, actions_exp.flatten(), axis=0),
                            np.append(rewards, rewards_exp.flatten(), axis=0),
                            np.append(obses_tp1, obses_tp1_exp, axis=0),
                            np.append(obses_tp1, obses_tp1_exp, axis=0),
                            np.append(dones.flatten(),
                                      dones_exp.flatten(),
                                      axis=0),
                            np.append(weights, weights_exp),
                            sess=self.sess,
                        )

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        assert isinstance(self.replay_buffer,
                                          PrioritizedReplayBuffer)
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                    callback.on_rollout_start()

                if (can_sample and self.num_timesteps > self.learning_starts
                        and self.num_timesteps %
                        self.target_network_update_freq == 0):
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if (self.verbose >= 1 and done and log_interval is not None
                        and len(episode_rewards) % log_interval == 0):
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate",
                                     np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)),
                    )
                    logger.dump_tabular()

        callback.on_training_end()
        return self
Exemplo n.º 18
0
    purity = False
    dirname = 'Bayesian_cases'

if config == 1:
    feedback = 'Markov'  #'Markov' or 'Bayes'
    qs = 0
    purity = True
    dirname = 'Markovian_cases'

e_c = 0.002  #here we set the entropy coefficient
steady = True  #if True resets always with steady state conditions
plot = False  #if True resets always to fixed out of equilibrium conditions
N = 1  #number of parallel workers
LRo = 2e-4  #define the learning rate
TIMESTEPS = int(6e6)  #training steps
sched_LR = LinearSchedule(1, LRo, 0)  #schedule for lr reduction
LR = sched_LR.value
clip = LinearSchedule(1, 0.2,
                      0).value  #schedule for clipping parameter PPO (eventual)

title = 'feed{}_steady{}_lro{}_ts{}M_N{}_ec{}_{}_{}_{}_partial{}_fbound{}_tanh0.01_pur0.5_hurwseedr0_1e5'.format(
    feedback, steady, LRo, TIMESTEPS / 1e6, N, e_c, k, mirr, g, partial,
    fbound)
#make checkpoint callback
checkpoint_callback = CheckpointCallback(
    save_freq=int(1000000 / N),
    save_path='/home/fallani/prova/New/Optomech_checkpoint/{}/{}_q{}'.format(
        dirname, title, qs))
callback = checkpoint_callback
#set F matrix
zero = np.zeros((2, 2))
Exemplo n.º 19
0
def main(args):
    """
    Train a DQN agent on cartpole env
    :param args: (Parsed Arguments) the input arguments
    """
    with tf_utils.make_session(8) as sess:
        # Create the environment
        env = gym.make("CartPole-v0")
        # Create all the functions necessary to train the model
        act, train, update_target, _ = deepq.build_train(
            q_func=CustomPolicy,
            ob_space=env.observation_space,
            ac_space=env.action_space,
            optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
            sess=sess)
        # Create the replay buffer
        replay_buffer = ReplayBuffer(50000)
        # Create the schedule for exploration starting from 1 (every action is random) down to
        # 0.02 (98% of actions are selected according to values predicted by the model).
        exploration = LinearSchedule(schedule_timesteps=10000,
                                     initial_p=1.0,
                                     final_p=0.02)

        # Initialize the parameters and copy them to the target network.
        tf_utils.initialize()
        update_target()

        episode_rewards = [0.0]
        obs = env.reset()
        for step in itertools.count():
            # Take action and update exploration to the newest value
            action = act(obs[None], update_eps=exploration.value(step))[0]
            new_obs, rew, done, _ = env.step(action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0)

            if len(episode_rewards[-101:-1]) == 0:
                mean_100ep_reward = -np.inf
            else:
                mean_100ep_reward = round(
                    float(np.mean(episode_rewards[-101:-1])), 1)

            is_solved = step > 100 and mean_100ep_reward >= 200

            if args.no_render and step > args.max_timesteps:
                break

            if is_solved:
                if args.no_render:
                    break
                # Show off the result
                env.render()
            else:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if step > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        32)
                    train(obses_t, actions, rewards, obses_tp1, dones,
                          np.ones_like(rewards))
                # Update target network periodically.
                if step % 1000 == 0:
                    update_target()

            if done and len(episode_rewards) % 10 == 0:
                logger.record_tabular("steps", step)
                logger.record_tabular("episodes", len(episode_rewards))
                logger.record_tabular("mean episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(step)))
                logger.dump_tabular()
Exemplo n.º 20
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              replay_wrapper=None):
        new_tb_log = self._init_num_timesteps(reset_num_timesteps)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn(seed)

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=1.0,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_successes = []
            Globals.env = self.env
            obs = self.env.reset()
            reset = True
            self.episode_reward = np.zeros((1, ))
            timesteps_last_log = 0
            avr_ep_len_per_log = None
            sleep = 0.045

            for _ in range(total_timesteps):

                if Globals.loading:
                    Globals.loading = False

                while Globals.pause_game:
                    pass

                if Globals.exit_learning:
                    break

                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    action = self.act(np.array(obs)[None],
                                      update_eps=update_eps,
                                      **kwargs)[0]
                env_action = action
                reset = False
                new_obs, rew, done, info = self.env.step(env_action)
                # Store transition in the replay buffer.
                self.replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    self.episode_reward = total_episode_reward_logger(
                        self.episode_reward, ep_rew, ep_done, writer,
                        self.num_timesteps)

                episode_rewards[-1] += rew
                if done:
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)

                if can_sample:
                    sleep = 0.035

                time.sleep(sleep)

                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if self.prioritized_replay:
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(
                                run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                if len(episode_rewards) % log_interval == 0:
                    avr_ep_len_per_log = (self.num_timesteps -
                                          timesteps_last_log) / log_interval
                    timesteps_last_log = self.num_timesteps

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate",
                                     np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)))
                    logger.record_tabular("avr length of last logged ep",
                                          avr_ep_len_per_log)
                    logger.dump_tabular()

                self.num_timesteps += 1
                Globals.steps -= 1

        return self
Exemplo n.º 21
0
#define custom network
b=64
class CustomPolicy(FeedForwardPolicy):
    def __init__(self, *args, **kwargs):
        super(CustomPolicy, self).__init__(*args, **kwargs,net_arch=[dict(pi=[b,b],
                                                          vf=[b,b])],feature_extraction="mlp")

e_c=0.01 #define entropy coeff
feedback='Bayes' #'Markov' or 'Bayes'
steady=True #if True resets always with steady state conditions
N=1 #number of parallel workers
LRo=2.5e-4  #learning rate                        
uact=False #if we want to use u as action (only Bayesian)
TIMESTEPS=int(50e6) #training steps
sched_LR=LinearSchedule(1,LRo,0) #lr schedule
LR=sched_LR.value 
qs=1e-3 #feedback cost (only Bayesian)
dirname='Tesi_bayestraj' #directory name
title='feed{}_steady{}_lro{}_ts{}M_N{}_ec{}_u{}0.35_1e5_hurw_excss'.format(feedback,steady,LRo,TIMESTEPS/1e6,N,e_c,uact)
#make checkpoint callback
checkpoint_callback = CheckpointCallback(save_freq=int(100000/N), save_path='/home/fallani/prova/New/Cavity_checkpoint/{}/{}_q{}'.format(dirname,title,qs))
callback = checkpoint_callback
#set parameters and start training
params={'k':1,'eta':1,'X_kunit':0.35} #if a parameter is set to None it will be sampled from a uniform distribution at every reset
args={'feedback':feedback,'q':qs,'uact':uact,'steadyreset':steady,'pow':0.5,'params':params,'plot':False}#i parametri di default son questi: rewfunc=Tools.purity_like_rew,q=1e-4,dt=1e-3,plot=False,pow=0.5
#instantiate environment
env = make_vec_env(CavityEnv,n_envs=N,env_kwargs=args) 
#instantiate model
model=PPO2(CustomPolicy,env,n_steps=128,learning_rate=LR,lam=0.95,ent_coef=e_c,verbose=1,nminibatches=4,noptepochs=4,tensorboard_log='/home/fallani/prova/New/TRAIN_Cavity/{}/{}_q{}'.format(dirname,title,qs),seed=1)
#train the model
Exemplo n.º 22
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              reset_num_timesteps=True):
        """
        Return a trained model.
        :param total_timesteps: (int) The total number of samples to train on
        :param seed: (int) The initial seed for training, if None: keep current seed
        :param callback: (function (dict, dict)) -> boolean function called at every steps with state of the algorithm.
            It takes the local and global variables. If it returns False, training is aborted.
        :param log_interval: (int) The number of timesteps before logging.
        :param reset_num_timesteps: (bool) whether or not to reset the current timestep number (used in logging)
        :return: (BaseRLModel) the trained model
        """

        self._setup_learn(seed)

        self.learning_rate = LinearSchedule(schedule_timesteps=int(
            self.lr_fraction * total_timesteps),
                                            initial_p=self.lr_initial,
                                            final_p=self.lr_final)
        self.temperature = LinearSchedule(schedule_timesteps=int(
            self.temp_fraction * total_timesteps),
                                          initial_p=self.temp_initial,
                                          final_p=self.temp_final)

        # Initialize variables
        episode_rewards = [0.0]
        episode_successes = []
        obs = self.env.reset()
        episode_length = 0

        for _ in range(total_timesteps):

            num_episodes = len(episode_rewards)

            if callback is not None:
                # Only stop training if return value is False, not when it is None.
                if callback(locals(), globals()) is False:
                    break

            # Act
            if hasattr(self.step_model, 'temperature'):
                self.step_model.temperature = self.temperature.value(
                    self.num_timesteps)
            action = self.act(obs, update_eps=self.eps)
            new_obs, reward, done, info = self.env.step(action)
            episode_rewards[-1] += reward

            # Update Q
            self._train_step(obs,
                             action,
                             reward,
                             new_obs,
                             done,
                             lr=self.learning_rate.value(self.num_timesteps))

            obs = new_obs

            # Restart if necesary
            if done:
                maybe_is_success = info.get('is_success')
                if maybe_is_success is not None:
                    episode_successes.append(float(maybe_is_success))

                obs = self.env.reset()
                # print(np.mean(episode_rewards), len(episode_rewards))
                episode_rewards.append(0.0)
                episode_length = 0

            # Performance in last 100 episodes
            if len(episode_rewards[-101:-1]) == 0:
                mean_100ep_reward = -np.inf
            else:
                mean_100ep_reward = round(
                    float(np.mean(episode_rewards[-101:-1])), 6)

            # Logging
            if self.verbose >= 1 and done and log_interval is not None and num_episodes % log_interval == 0:
                logger.record_tabular("steps", self.num_timesteps)
                logger.record_tabular("episodes", num_episodes)
                if len(episode_successes) > 0:
                    logger.logkv("success rate",
                                 np.mean(episode_successes[-100:]))
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular(
                    "Softmax temperature",
                    int(self.temperature.value(self.num_timesteps)))
                logger.record_tabular(
                    "Learning rate",
                    int(self.learning_rate.value(self.num_timesteps)))
                logger.dump_tabular()

            self.num_timesteps += 1
            episode_length += 1

        return self
Exemplo n.º 23
0
    def learn(self,
              total_timesteps,
              callback=None,
              log_interval=100,
              tb_log_name="DQN",
              reset_num_timesteps=True,
              replay_wrapper=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(
                    prioritized_replay_beta_iters,
                    initial_p=self.prioritized_replay_beta0,
                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=self.exploration_initial_eps,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_successes = []
            obs = self.env.reset()
            reset = True

            ############################################################
            # MODIFICATION:
            # Track list of actions taken each episode. This is
            # intentionally not a set so that we can use np.isin.
            action_list = list()
            ############################################################

            for _ in range(total_timesteps):
                if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                    if callback(locals(), globals()) is False:
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    ####################################################
                    # MODIFICATION:
                    # Rename variable from original, since it's now
                    # going to come back as an array due to the
                    # modified build_act function being used to
                    # construct everything.
                    action_arr = self.act(np.array(obs)[None],
                                          update_eps=update_eps,
                                          **kwargs)[0]
                    ####################################################
                    # ORIGINAL:
                    # action = self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]

                ########################################################
                # MODIFICATION:
                # Get the best action that has not yet been taken this
                # episode.
                action = \
                    action_arr[np.argmin(np.isin(action_arr, action_list))]
                # Add this action to the list.
                action_list.append(action)
                ########################################################

                env_action = action
                reset = False
                new_obs, rew, done, info = self.env.step(env_action)
                # Store transition in the replay buffer.
                self.replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    total_episode_reward_logger(self.episode_reward, ep_rew,
                                                ep_done, writer,
                                                self.num_timesteps)

                episode_rewards[-1] += rew
                if done:
                    ####################################################
                    # MODIFICATION:
                    # Clear the list.
                    action_list.clear()
                    ####################################################
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert self.beta_schedule is not None, \
                               "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # pytype:enable=bad-unpacking

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(
                                run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        assert isinstance(self.replay_buffer,
                                          PrioritizedReplayBuffer)
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate",
                                     np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

                self.num_timesteps += 1

        return self
Exemplo n.º 24
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              tb_log_name="DQN"):
        with SetVerbosity(self.verbose), TensorboardWriter(
                self.graph, self.tensorboard_log, tb_log_name) as writer:
            self._setup_learn(seed)

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = SimplePrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps * self.beta_fraction
                    self.beta_schedule = LinearSchedule(
                        prioritized_replay_beta_iters,
                        initial_p=self.prioritized_replay_beta0,
                        final_p=1.0)
            else:
                # self.replay_buffer = ReplayBuffer(self.buffer_size, gamma=self.gamma, hindsight=self.hindsight, multistep=self.multistep)
                self.replay_buffer = EpisodeReplayBuffer(
                    self.buffer_size, hindsight=self.hindsight)
                self.solved_replay_buffer = EpisodeReplayBuffer(
                    self.buffer_size, hindsight=self.hindsight)
                # self.replay_buffer = SimpleReplayBuffer(self.buffer_size)
                self.beta_schedule = None
            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=1.0,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_trans = []
            episode_replays = []
            episode_success = [0] * log_interval
            episode_finals = [0] * log_interval
            episode_losses = []
            is_in_loop = False
            loss_accumulator = [0.] * 50

            episode_places = set()
            episode_div = [0] * log_interval

            full_obs = self.env.reset()
            part_obs = np.concatenate(
                (full_obs['observation'], full_obs['desired_goal']), axis=-1)
            begin_obs = [full_obs] * log_interval

            reset = True
            self.episode_reward = np.zeros((1, ))

            for step in range(total_timesteps):
                # self.steps_made += 1
                # if step >= 7 * 100 * 150:
                #     raise Exception("trigger")
                # curriculum
                # curriculum_scrambles = 1 + int(self.steps_made ** (0.50)) // 500
                # curriculum_step_limit = min((curriculum_scrambles + 2) * 2, 100)
                # self.replay_buffer.set_sampling_cut(curriculum_step_limit)
                # self.env.scrambleSize = curriculum_scrambles
                # self.env.step_limit = curriculum_step_limit

                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(step)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(step) +
                                self.exploration.value(step) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    # Loop breaking
                    if self.loop_breaking and is_in_loop:
                        # update_eps_value = (update_eps + 1.) / 2.
                        update_eps_value = 1.
                    else:
                        update_eps_value = update_eps
                    if self.boltzmann:
                        values = self.predict_q_values(np.array(part_obs))[0]
                        exp = 1. / update_eps_value
                        action = np.random.choice(
                            np.arange(0, values.shape[0]),
                            p=(exp**values) / sum(exp**values))
                    else:
                        action = self.act(np.array(part_obs)[None],
                                          update_eps=update_eps_value,
                                          **kwargs)[0]
                # action = self.env.action_space.sample()
                env_action = action
                reset = False
                new_obs, rew, done, _ = self.env.step(env_action)

                current_place = None
                is_in_loop = False
                try:
                    current_place = tuple(self.env.room_state.flatten())
                except AttributeError:
                    current_place = tuple(new_obs['observation'].flatten())
                if current_place in episode_places:
                    is_in_loop = True
                episode_places.add(current_place)

                # Store transition in the replay buffer.
                # self.replay_buffer.add(part_obs, action, rew, np.concatenate((new_obs['observation'], new_obs['desired_goal'])), float(done))
                episode_replays.append(
                    (full_obs, action, rew, new_obs, float(done)))
                episode_trans.append((full_obs, action, rew, new_obs))
                full_obs = new_obs
                part_obs = np.concatenate(
                    (full_obs['observation'], full_obs['desired_goal']),
                    axis=-1)

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    self.episode_reward = total_episode_reward_logger(
                        self.episode_reward, ep_rew, ep_done, writer, step)

                episode_rewards[-1] += rew
                if done:
                    if np.array_equal(full_obs['achieved_goal'],
                                      full_obs['desired_goal']):
                        episode_success.append(1.)
                        self.solved_episodes.append(episode_replays)
                    else:
                        episode_success.append(0.)
                    episode_success = episode_success[1:]
                    episode_div.append(len(episode_places))
                    episode_div = episode_div[1:]
                    self.episodes_completed += 1
                    if self.model_save_freq > 0 and self.episodes_completed % self.model_save_freq == 0:
                        self.save_model_checkpoint()
                    if self.episodes_completed % (200 * 100) == 0:
                        self.dump_solved_episodes()

                    if not isinstance(self.env, VecEnv):
                        full_obs = self.env.reset()
                        # print(full_obs)
                        part_obs = np.concatenate((full_obs['observation'],
                                                   full_obs['desired_goal']),
                                                  axis=-1)

                    def postprocess_replays(raw_replays, buffer,
                                            prioritized_replay):
                        if not prioritized_replay:
                            buffer.add(raw_replays)
                            return

                        for _ in range(10):
                            for id, (full_obs, action, rew, new_obs,
                                     done) in enumerate(raw_replays):
                                offset = np.random.randint(
                                    id, len(raw_replays))
                                target = raw_replays[offset][3][
                                    'achieved_goal']
                                obs = np.concatenate(
                                    [full_obs['observation'], target], axis=-1)
                                step = np.concatenate(
                                    [new_obs['observation'], target], axis=-1)
                                if np.array_equal(new_obs['achieved_goal'],
                                                  target):
                                    rew = 0.
                                    done = 1.
                                else:
                                    rew = -1.
                                    done = 0.

                                buffer.add(obs, action, rew, step, done)

                    postprocess_replays(episode_replays, self.replay_buffer,
                                        self.prioritized_replay)

                    begin_obs.append(full_obs)
                    begin_obs = begin_obs[1:]

                    if callback is not None:
                        callback(locals(), globals())

                    episode_rewards.append(0.0)
                    episode_trans = []
                    episode_replays = []
                    episode_places = set()
                    episode_losses = []
                    reset = True
                    is_in_loop = False

                if step > self.learning_starts and step % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if self.prioritized_replay:
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(step))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                        weights /= np.mean(weights)
                    else:
                        if np.random.randint(0, 100) < 100:  # always
                            obses_t, actions, rewards, obses_tp1, dones, info = self.replay_buffer.sample(
                                self.batch_size)
                        else:
                            obses_t, actions, rewards, obses_tp1, dones, info = self.solved_replay_buffer.sample(
                                self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + step) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(run_metadata,
                                                    'step%d' % step)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, step)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if not self.prioritized_replay:
                        for (dist, error) in zip(info, td_errors):
                            if len(loss_accumulator) < dist + 1:
                                loss_accumulator += [0.] * (
                                    dist + 1 - len(loss_accumulator))
                            loss_accumulator[
                                dist] = loss_accumulator[dist] * 0.99 + huber(
                                    1., error)

                        # if step % 1000 == 0:
                        #     print('accumulator', [int(x) for x in loss_accumulator])
                        #     weights_sum = sum(loss_accumulator)
                        #     print('normalized ', ['%.2f' % (x / weights_sum) for x in loss_accumulator])
                        #     print('distance   ', info)

                    loss = np.mean(
                        np.dot(weights,
                               [huber(1., error) for error in td_errors]))
                    episode_losses.append(loss)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if step > self.learning_starts and step % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-(log_interval + 1):-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(
                            episode_rewards[-(log_interval + 1):-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", step)
                    logger.record_tabular("episodes", num_episodes)
                    logger.record_tabular(
                        "mean {0} episode reward".format(log_interval),
                        mean_100ep_reward)
                    logger.record_tabular(
                        "{0} episode success".format(log_interval),
                        np.mean(episode_success))
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(step)))
                    logger.dump_tabular()

        return self
    def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="DQN",
              reset_num_timesteps=True, replay_wrapper=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)
        print("args are", self.kappa, self.phi_grad_update_freq, self.seed, np.random.randint(100))

        with SetVerbosity(self.verbose): 

            # Create the replay buffer
            self.replay_buffer = ReplayBuffer(self.buffer_size)
            self.beta_schedule = None

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),
                                              initial_p=1.0,
                                              final_p=self.exploration_final_eps)
            #self.exploration = PiecewiseSchedule([(0,        1.0), (int(1e6), 0.1), (int(1e7), 0.01)], outside_value=0.01)

            episode_rewards = [0.0]
            episode_successes = []
            #td_errors_mean = []
            #td_phi_errors_mean = []
            obs = self.env.reset()
            reset = True
            self.episode_reward = np.zeros((1,))

            for _ in range(total_timesteps):
                #if callback is not None:
                    # Only stop training if return value is False, not when it is None. This is for backwards
                    # compatibility with callbacks that have no return statement.
                #    if callback(locals(), globals()) is False:
                #        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    action = self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
                env_action = action
                reset = False
                new_obs, rew, done, info = self.env.step(env_action)
                
                # Store transition in the replay buffer.
                self.replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                episode_rewards[-1] += rew
                if done:
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                    and self.num_timesteps % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None

                    # Use v_phi as zero until buffere is filled
                    if self.num_timesteps <= self.buffer_size:
                        weights = np.zeros_like(rewards)

                    with self.sess.as_default():
                        #actions_policy = self.act(obses_t)
                        actions_policy_phi = self.act(obses_tp1)
                    
                    _, td_errors = self._train_step(obses_t, actions, actions_policy_phi, actions_policy_phi, rewards, obses_tp1, obses_tp1, obses_t, obses_tp1, obses_tp1, dones, weights,
                                                        sess=self.sess)
                    #td_errors_mean.append(np.mean(td_errors))  

                if can_sample and self.kappa != 1.0 and self.num_timesteps >= self.buffer_size and \
                    self.num_timesteps % (self.phi_grad_update_freq * self.train_freq) == 0:
                    #print("updating vf phi now", self.num_timesteps)
                    #td_phi_err = []
                    for i in range(self.phi_grad_update_freq): #int(self.phi_grad_update_freq / self.train_freq)):
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None

                        with self.sess.as_default():
                            #actions_policy = self.act(obses_t)
                            actions_policy_phi = self.act(obses_tp1)

                        _, td_phi_errors = self._train_phi_step(obses_t, actions, actions_policy_phi, actions_policy_phi, rewards, obses_tp1, obses_tp1, obses_t, obses_tp1, obses_tp1, dones, weights,
                                                            sess=self.sess)
                        #_, q_values_st = self.q_value_st(obses_t, actions, actions_policy_phi, actions_policy_phi, rewards, obses_tp1, obses_tp1, obses_t, obses_tp1, obses_tp1, dones, weights,
                        #                                sess=self.sess)
                        #td_phi_err.append(np.mean(td_phi_errors))
                    #print("td errors after phi update", np.mean(td_phi_err))
                    #print("q vals", np.mean(q_values_st))

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
                    with self.timed("eval time"):
                        if self.test_env is not None and len(episode_rewards) % (10 * log_interval) == 0:
                            eval_return, actual_return = self.evaluate_agent(self.test_env)
                        else:
                            eval_return, actual_return = None, None

                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate", np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                    logger.record_tabular("eval return", eval_return)
                    logger.record_tabular("actual return", actual_return)
                    #logger.record_tabular("td errors", np.mean(td_errors_mean))
                    #logger.record_tabular("td errors phi", np.mean(td_phi_errors_mean))
                    logger.record_tabular("% time spent exploring",
                                          int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

                    #td_errors_mean = []
                    #td_phi_errors_mean = []

                if self.checkpoint_path is not None and self.num_timesteps % self.checkpoint_freq == 0:
                    self.save(self.checkpoint_path)

                self.num_timesteps += 1

        return self
Exemplo n.º 26
0
    eval_env = gym.make('gym_docking:docking-v2')
    eval_callback = EvalCallback(
        eval_env,
        best_model_save_path='./logs/best_shaping_moving_b_10M_model',
        log_path='./logs/best_shaping_moving_b_10M_results',
        eval_freq=600)

    checkpoint_callback = CheckpointCallback(
        save_freq=int(5e4),
        save_path='./logs/',
        name_prefix='rl_model_621_shaping_moving_b_10M')

    # Create the callback list
    callback = CallbackList([checkpoint_callback, eval_callback])

    lr_sch = LinearSchedule(int(10e6), 1.0e-5, 2.5e-4)

    model = PPO2(
        policy=MlpPolicy,
        env=env,
        verbose=1,
        tensorboard_log="./ppo2_docking_tensorboard/",
        policy_kwargs=dict(net_arch=[128, dict(pi=[128], vf=[128])],
                           act_fun=tf.nn.relu),
        lam=0.95,
        gamma=0.99,  # lower 0.9 ~ 0.99
        # n_steps=math.floor(cfg['env']['max_time'] / cfg['env']['ctl_dt']),
        n_steps=600,
        ent_coef=0.00,
        learning_rate=3e-4,
        # learning_rate=lr_sch.value,
Exemplo n.º 27
0
    def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DQN",
              reset_num_timesteps=True, replay_wrapper=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)
        callback = self._init_callback(callback)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                                    initial_p=self.prioritized_replay_beta0,
                                                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)

            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),
                                              initial_p=self.exploration_initial_eps,
                                              final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            episode_successes = []

            callback.on_training_start(locals(), globals())
            callback.on_rollout_start()

            reset = True
            obs = self.env.reset()
            # Retrieve unnormalized observation for saving into the buffer
            if self._vec_normalize_env is not None:
                obs_ = self._vec_normalize_env.get_original_obs().squeeze()

            for _ in range(total_timesteps):
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    action = self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
                env_action = action
                reset = False
                new_obs, rew, done, info = self.env.step(env_action)

                self.num_timesteps += 1

                # Stop training if return value is False
                if callback.on_step() is False:
                    break

                # Store only the unnormalized version
                if self._vec_normalize_env is not None:
                    new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
                    reward_ = self._vec_normalize_env.get_original_reward().squeeze()
                else:
                    # Avoid changing the original ones
                    obs_, new_obs_, reward_ = obs, new_obs, rew
                # Store transition in the replay buffer.
                self.replay_buffer.add(obs_, action, reward_, new_obs_, float(done))
                if self.expert_exp is not None:
                    self.add_expert_exp()
                obs = new_obs
                # Save the unnormalized observation
                if self._vec_normalize_env is not None:
                    obs_ = new_obs_

                if writer is not None:
                    ep_rew = np.array([reward_]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
                                                        self.num_timesteps)

                episode_rewards[-1] += reward_
                if done:
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:

                    callback.on_rollout_end()
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert self.beta_schedule is not None, \
                               "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(self.batch_size,
                                                               beta=self.beta_schedule.value(self.num_timesteps),
                                                               env=self._vec_normalize_env)
                        (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size,
                                                                                                env=self._vec_normalize_env)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # pytype:enable=bad-unpacking

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
                                                                  dones, weights, sess=self.sess, options=run_options,
                                                                  run_metadata=run_metadata)
                            writer.add_run_metadata(run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
                                                                  dones, weights, sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1, dones, weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(td_errors) + self.prioritized_replay_eps
                        assert isinstance(self.replay_buffer, PrioritizedReplayBuffer)
                        self.replay_buffer.update_priorities(batch_idxes, new_priorities)

                    callback.on_rollout_start()

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate", np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                    logger.record_tabular("% time spent exploring",
                                          int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

        callback.on_training_end()
        return self
Exemplo n.º 28
0
    def learn(self, total_timesteps, callback=None, log_interval=4, tb_log_name="BDQ",
              reset_num_timesteps=True, replay_wrapper=None):

        new_tb_log = self._init_num_timesteps(reset_num_timesteps)
        callback = self._init_callback(callback)

        with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
                as writer:
            self._setup_learn()

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                else:
                    prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
                self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                                    initial_p=self.prioritized_replay_beta0,
                                                    final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None

            if replay_wrapper is not None:
                assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
                self.replay_buffer = replay_wrapper(self.replay_buffer)


            if self.epsilon_greedy:
                approximate_num_iters = 2e6 / 4
                # TODO Decide which schedule type to use
                # self.exploration = PiecewiseSchedule([(0, 1.0),
                #                                 (approximate_num_iters / 50, 0.1), 
                #                                 (approximate_num_iters / 5, 0.01) 
                #                                 ], outside_value=0.01)
                self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),
                                                  initial_p=self.exploration_initial_eps,
                                                  final_p=self.exploration_final_eps)
            else:
                self.exploration = ConstantSchedule(value=0.0) # greedy policy
                std_schedule = LinearSchedule(schedule_timesteps=self.timesteps_std,
                                              initial_p=self.initial_std,
                                              final_p=self.final_std)

            episode_rewards = [0.0]
            episode_successes = []

            callback.on_training_start(locals(), globals())
            callback.on_rollout_start()

            obs = self.env.reset()
            reset = True
            self.episode_reward = np.zeros((1,))
            # Retrieve unnormalized observation for saving into the buffer
            if self._vec_normalize_env is not None:
                obs_ = self._vec_normalize_env.get_original_obs().squeeze()

            for _ in range(total_timesteps):
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(self.num_timesteps)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(self.num_timesteps) +
                                self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    # action = self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
                    # print("time step {} and update eps {}".format(self.num_timesteps, update_eps))
                    action_idxes = np.array(self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)) #update_eps=exploration.value(t)))
                    action = action_idxes / self.num_action_grains * self.actions_range + self.low
                
                if not self.epsilon_greedy: # Gaussian noise
                    actions_greedy = action
                    action_idx_stoch = []
                    action = []
                    for index in range(len(actions_greedy)): 
                        a_greedy = actions_greedy[index]
                        out_of_range_action = True 
                        while out_of_range_action:
                            # Sample from a Gaussian with mean at the greedy action and a std following a schedule of choice  
                            a_stoch = np.random.normal(loc=a_greedy, scale=std_schedule.value(self.num_timesteps))

                            # Convert sampled cont action to an action idx
                            a_idx_stoch = np.rint((a_stoch + self.high[index]) / self.actions_range[index] * self.num_action_grains)

                            # Check if action is in range
                            if a_idx_stoch >= 0 and a_idx_stoch < self.num_actions_pad:
                                action_idx_stoch.append(a_idx_stoch)
                                action.append(a_stoch)
                                out_of_range_action = False

                    action_idxes = action_idx_stoch
                env_action = action
                reset = False
                new_obs, rew, done, info = self.env.step(env_action)

                self.num_timesteps += 1

                # Stop training if return value is False
                if callback.on_step() is False:
                    break
                # Store only the unnormalized version
                if self._vec_normalize_env is not None:
                    new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
                    reward_ = self._vec_normalize_env.get_original_reward().squeeze()
                else:
                    # Avoid changing the original ones
                    obs_, new_obs_, reward_ = obs, new_obs, rew
                # Store transition in the replay buffer.
                self.replay_buffer.add(obs_, action_idxes, reward_, new_obs_, float(done))
                obs = new_obs
                # Save the unnormalized observation
                if self._vec_normalize_env is not None:
                    obs_ = new_obs_

                if writer is not None:
                    ep_rew = np.array([reward_]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
                                                        self.num_timesteps)
                    # self.episode_reward = total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
                    #                                                   self.num_timesteps)

                # episode_rewards[-1] += rew
                episode_rewards[-1] += reward_
                if done:
                    # print("ep number", len(episode_rewards))
                    maybe_is_success = info.get('is_success')
                    if maybe_is_success is not None:
                        episode_successes.append(float(maybe_is_success))
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                # Do not train if the warmup phase is not over
                # or if there are not enough samples in the replay buffer
                can_sample = self.replay_buffer.can_sample(self.batch_size)
                if can_sample and self.num_timesteps > self.learning_starts \
                        and self.num_timesteps % self.train_freq == 0:
                    
                    callback.on_rollout_end()
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    # pytype:disable=bad-unpacking
                    if self.prioritized_replay:
                        assert self.beta_schedule is not None, \
                               "BUG: should be LinearSchedule when self.prioritized_replay True"
                        experience = self.replay_buffer.sample(self.batch_size,
                                                               beta=self.beta_schedule.value(self.num_timesteps))
                        (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    # pytype:enable=bad-unpacking
                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + self.num_timesteps) % 100 == 0:
                            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors, mean_loss = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
                                                                  dones, weights, sess=self.sess, options=run_options,
                                                                  run_metadata=run_metadata)
                            writer.add_run_metadata(run_metadata, 'step%d' % self.num_timesteps)
                        else:
                            summary, td_errors, mean_loss = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
                                                                  dones, weights, sess=self.sess)
                        writer.add_summary(summary, self.num_timesteps)
                    else:
                        _, td_errors, mean_loss = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1, dones, weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(td_errors) + self.prioritized_replay_eps
                        assert isinstance(self.replay_buffer, PrioritizedReplayBuffer)
                        self.replay_buffer.update_priorities(batch_idxes, new_priorities)

                    callback.on_rollout_start()

                if can_sample and self.num_timesteps > self.learning_starts and \
                        self.num_timesteps % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                # Log training infos
                kvs = {}
                if self.verbose >= 1 and done and log_interval is not None \
                    and len(episode_rewards) % log_interval == 0 \
                    and self.num_timesteps > self.train_freq \
                    and self.num_timesteps > self.learning_starts:
                    
                    if self.log_dir is not None:
                        kvs["episodes"] = num_episodes
                        kvs["mean_100rew"] = mean_100ep_reward
                        kvs["current_lr"] = self.learning_rate
                        kvs["success_rate"] = np.mean(episode_successes[-100:])
                        kvs["total_timesteps"] = self.num_timesteps
                        kvs["mean_loss"] = mean_loss
                        kvs["mean_td_errors"] = np.mean(td_errors)
                        kvs["time_spent_exploring"] = int(100 * self.exploration.value(self.num_timesteps))
                        self.log_csv.writekvs(kvs) 

                    logger.record_tabular("steps", self.num_timesteps)
                    logger.record_tabular("episodes", num_episodes)
                    if len(episode_successes) > 0:
                        logger.logkv("success rate", np.mean(episode_successes[-100:]))
                    logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                    logger.record_tabular("% time spent exploring",
                                          int(100 * self.exploration.value(self.num_timesteps)))
                    logger.dump_tabular()

        callback.on_training_end()
        return self
Exemplo n.º 29
0
    def learn(self,
              total_timesteps,
              callback=None,
              seed=None,
              log_interval=100,
              tb_log_name="DQN"):
        with SetVerbosity(self.verbose), TensorboardWriter(
                self.graph, self.tensorboard_log, tb_log_name) as writer:
            self._setup_learn(seed)

            # Create the replay buffer
            if self.prioritized_replay:
                self.replay_buffer = PrioritizedReplayBuffer(
                    self.buffer_size, alpha=self.prioritized_replay_alpha)
                if self.prioritized_replay_beta_iters is None:
                    prioritized_replay_beta_iters = total_timesteps
                    self.beta_schedule = LinearSchedule(
                        prioritized_replay_beta_iters,
                        initial_p=self.prioritized_replay_beta0,
                        final_p=1.0)
            else:
                self.replay_buffer = ReplayBuffer(self.buffer_size)
                self.beta_schedule = None
            # Create the schedule for exploration starting from 1.
            self.exploration = LinearSchedule(
                schedule_timesteps=int(self.exploration_fraction *
                                       total_timesteps),
                initial_p=1.0,
                final_p=self.exploration_final_eps)

            episode_rewards = [0.0]
            obs = self.env.reset()
            reset = True
            self.episode_reward = np.zeros((1, ))

            for step in range(total_timesteps):
                if callback is not None:
                    callback(locals(), globals())
                # Take action and update exploration to the newest value
                kwargs = {}
                if not self.param_noise:
                    update_eps = self.exploration.value(step)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = \
                        -np.log(1. - self.exploration.value(step) +
                                self.exploration.value(step) / float(self.env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                with self.sess.as_default():
                    action = self.act(np.array(obs)[None],
                                      update_eps=update_eps,
                                      **kwargs)[0]
                env_action = action
                reset = False
                new_obs, rew, done, _ = self.env.step(env_action)
                # Store transition in the replay buffer.
                self.replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                if writer is not None:
                    ep_rew = np.array([rew]).reshape((1, -1))
                    ep_done = np.array([done]).reshape((1, -1))
                    self.episode_reward = total_episode_reward_logger(
                        self.episode_reward, ep_rew, ep_done, writer, step)

                episode_rewards[-1] += rew
                if done:
                    if not isinstance(self.env, VecEnv):
                        obs = self.env.reset()
                    episode_rewards.append(0.0)
                    reset = True

                if step > self.learning_starts and step % self.train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if self.prioritized_replay:
                        experience = self.replay_buffer.sample(
                            self.batch_size,
                            beta=self.beta_schedule.value(step))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(
                            self.batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None

                    if writer is not None:
                        # run loss backprop with summary, but once every 100 steps save the metadata
                        # (memory, compute time, ...)
                        if (1 + step) % 100 == 0:
                            run_options = tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE)
                            run_metadata = tf.RunMetadata()
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess,
                                options=run_options,
                                run_metadata=run_metadata)
                            writer.add_run_metadata(run_metadata,
                                                    'step%d' % step)
                        else:
                            summary, td_errors = self._train_step(
                                obses_t,
                                actions,
                                rewards,
                                obses_tp1,
                                obses_tp1,
                                dones,
                                weights,
                                sess=self.sess)
                        writer.add_summary(summary, step)
                    else:
                        _, td_errors = self._train_step(obses_t,
                                                        actions,
                                                        rewards,
                                                        obses_tp1,
                                                        obses_tp1,
                                                        dones,
                                                        weights,
                                                        sess=self.sess)

                    if self.prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + self.prioritized_replay_eps
                        self.replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

                if step > self.learning_starts and step % self.target_network_update_freq == 0:
                    # Update target network periodically.
                    self.update_target(sess=self.sess)

                if len(episode_rewards[-101:-1]) == 0:
                    mean_100ep_reward = -np.inf
                else:
                    mean_100ep_reward = round(
                        float(np.mean(episode_rewards[-101:-1])), 1)

                num_episodes = len(episode_rewards)
                if self.verbose >= 1 and done and log_interval is not None and len(
                        episode_rewards) % log_interval == 0:
                    logger.record_tabular("steps", step)
                    logger.record_tabular("episodes", num_episodes)
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward)
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * self.exploration.value(step)))
                    logger.dump_tabular()

        return self
Exemplo n.º 30
0
    log_dir=log_dir,
    save_freq=interval * save_interval)

timer = time.time() - start_timer

# Add callbacks to the callback list
callbackList = []
useBestCallback = True

if useBestCallback:
    callbackList.append(callbackBest)

model = SAC(MlpPolicy_SAC,
            env,
            verbose=1,
            learning_rate=LinearSchedule(interval * icount, 0.001, 0.01).value,
            gamma=0.99,
            tau=3e-4,
            batch_size=2048,
            train_freq=25,
            target_update_interval=25,
            policy_kwargs=policy_kwargs,
            learning_starts=interval - 1,
            n_cpu_tf_sess=multiprocessing.cpu_count(),
            tensorboard_log=parent_dir + "tensorboard/")
print()

model.learn(total_timesteps=interval * icount,
            log_interval=interval,
            tb_log_name="",
            callback=callbackList)