def sample(self, idx=None, envx=None):
        assert self.can_sample()
        idx = np.random.randint(self._num_in_buffer,
                                size=self.num_envs) if idx is None else idx
        num_envs = self.num_envs

        envx = np.arange(num_envs) if envx is None else envx

        take = lambda x: self.take(x, idx, envx
                                   )  # for i in range(num_envs)], axis = 0)

        # (nstep, num_envs)
        states = self.take_block(self.state_block, idx, envx, 0)
        next_states = self.take_block(self.state_block, idx, envx, 1)
        actions = take(self.actions)
        mus = take(self.mus)
        rewards = take(self.rewards)
        dones = take(self.dones)
        timeouts = take(self.timeouts)
        infos = take(self.infos)

        samples = Dataset(dtype=self.dtype,
                          max_size=self.num_envs * self.n_steps)
        steps = [
            states, actions, next_states, mus, rewards, dones, timeouts, infos
        ]
        steps = list(map(flatten_first_2_dims, steps))
        samples.extend(np.rec.fromarrays(steps, dtype=self.dtype))
        return samples
Пример #2
0
    def run(self,
            policy: Actor,
            n_samples: int,
            classifier=None,
            stochastic=True):
        ep_infos = []
        n_steps = n_samples // self.n_envs
        assert n_steps * self.n_envs == n_samples
        dataset = Dataset(self._dtype, n_samples)

        if self._actions is None:
            self._actions = self._get_action(policy, self._states, stochastic)
        for T in range(n_steps):
            unscaled_actions = self._actions.copy()
            if self.rescale_action:
                lo, hi = self.env.action_space.low, self.env.action_space.high
                actions = (lo + (unscaled_actions + 1.) * 0.5 * (hi - lo))
            else:
                actions = unscaled_actions

            next_states, rewards, dones, infos = self.env.step(actions)
            if classifier is not None:
                rewards = classifier.get_rewards(self._states,
                                                 unscaled_actions, next_states)
            next_actions = self._get_action(policy, next_states, stochastic)
            dones = dones.astype(bool)
            self._returns += rewards
            self._n_steps += 1
            timeouts = self._n_steps == self.max_steps

            steps = [
                self._states.copy(), unscaled_actions,
                next_states.copy(),
                next_actions.copy(), rewards, dones, timeouts,
                self._n_steps.copy()
            ]
            dataset.extend(np.rec.fromarrays(steps, dtype=self._dtype))

            indices = np.where(dones | timeouts)[0]
            if len(indices) > 0:
                next_states = next_states.copy()
                next_states[indices] = self.env.partial_reset(indices)
                next_actions = next_actions.copy()
                next_actions[indices] = self._get_action(
                    policy, next_states, stochastic)[indices]
                for index in indices:
                    infos[index]['episode'] = {
                        'return': self._returns[index],
                        'length': self._n_steps[index]
                    }
                self._n_steps[indices] = 0
                self._returns[indices] = 0.

            self._states = next_states.copy()
            self._actions = next_actions.copy()
            ep_infos.extend(
                [info['episode'] for info in infos if 'episode' in info])

        return dataset, ep_infos
Пример #3
0
    def run(self, policy: BasePolicy, n_samples: int):
        ep_infos = []
        self.rewards_params_list = []
        self.reward_ctrl_list = []
        self.reward_state_list = []
        n_steps = n_samples // self.n_envs
        assert n_steps * self.n_envs == n_samples
        dataset = Dataset(self._dtype, n_samples)
        self.begin_mark = np.zeros((n_steps, self.n_envs), dtype=np.float32)
        start = np.array([0 for _ in range(self.n_envs)])

        for T in range(n_steps):
            unscaled_actions = policy.get_actions(self._states)
            if self.rescale_action:
                lo, hi = self.env.action_space.low, self.env.action_space.high
                actions = (lo + (unscaled_actions + 1.) * 0.5 * (hi - lo))
            else:
                actions = unscaled_actions

            next_states, rewards, dones, infos = self.env.step(actions)
            self.reward_ctrl_list.append([i['reward_ctrl'] for i in infos])
            self.reward_state_list.append([i['reward_state'] for i in infos])
            dones = dones.astype(bool)
            self._returns += rewards
            self._n_steps += 1
            timeouts = self._n_steps == self.max_steps

            steps = [
                self._states.copy(), unscaled_actions,
                next_states.copy(), rewards, dones, timeouts
            ]
            dataset.extend(np.rec.fromarrays(steps, dtype=self._dtype))

            indices = np.where(dones | timeouts)[0]
            if len(indices) > 0:
                next_states = next_states.copy()
                next_states[indices] = self.env.partial_reset(indices)
                for index in indices:
                    infos[index]['episode'] = {'return': self._returns[index]}
                    self.begin_mark[start[index]][index] = 1
                self._n_steps[indices] = 0
                self._returns[indices] = 0.
                start[indices] = T + 1

            self._states = next_states.copy()
            ep_infos.extend(
                [info['episode'] for info in infos if 'episode' in info])

        if len(ep_infos) == 0:
            print("oops!")
            assert (False)
        return dataset, ep_infos
    def run(self, policy: BasePolicy, n_samples: int):
        ep_infos = []
        n_steps = n_samples // self.n_envs
        assert n_steps * self.n_envs == n_samples
        dataset = Dataset(self._dtype, n_samples)

        for T in range(n_steps):
            unscaled_actions = policy.get_actions(self._states)
            if self.rescale_action:
                lo, hi = self.env.action_space.low, self.env.action_space.high
                actions = lo + (unscaled_actions + 1.) * 0.5 * (hi - lo)
            else:
                actions = unscaled_actions

            next_states, rewards, dones, infos = self.env.step(actions)
            dones = dones.astype(bool)
            self._returns += rewards
            self._n_steps += 1
            timeouts = self._n_steps == self.max_steps
            terminals = np.copy(dones)
            for e, info in enumerate(infos):
                if self.partial_episode_bootstrapping and info.get(
                        'TimeLimit.truncated', False):
                    terminals[e] = False

            steps = [
                self._states.copy(), unscaled_actions,
                next_states.copy(), rewards, terminals, timeouts
            ]
            dataset.extend(np.rec.fromarrays(steps, dtype=self._dtype))

            indices = np.where(dones | timeouts)[0]
            if len(indices) > 0:
                next_states = next_states.copy()
                next_states[indices] = self.env.partial_reset(indices)
                for index in indices:
                    infos[index]['episode'] = {
                        'return': self._returns[index],
                        'length': self._n_steps[index]
                    }
                self._n_steps[indices] = 0
                self._returns[indices] = 0.

            self._states = next_states.copy()
            ep_infos.extend(
                [info['episode'] for info in infos if 'episode' in info])

        return dataset, ep_infos
Пример #5
0
def main():
    FLAGS.set_seed()
    FLAGS.freeze()

    env = make_env(FLAGS.env.id,
                   FLAGS.env.env_type,
                   num_env=FLAGS.env.num_env,
                   seed=FLAGS.seed,
                   log_dir=FLAGS.log_dir,
                   rescale_action=FLAGS.env.rescale_action)
    env_eval = make_env(FLAGS.env.id,
                        FLAGS.env.env_type,
                        num_env=4,
                        seed=FLAGS.seed + 1000,
                        log_dir=FLAGS.log_dir)
    dim_state = env.observation_space.shape[0]
    dim_action = env.action_space.shape[0]

    actor = Actor(dim_state,
                  dim_action,
                  hidden_sizes=FLAGS.TD3.actor_hidden_sizes)
    critic = Critic(dim_state,
                    dim_action,
                    hidden_sizes=FLAGS.TD3.critic_hidden_sizes)
    td3 = TD3(dim_state,
              dim_action,
              actor=actor,
              critic=critic,
              **FLAGS.TD3.algo.as_dict())

    tf.get_default_session().run(tf.global_variables_initializer())
    td3.update_actor_target(tau=0.0)
    td3.update_critic_target(tau=0.0)

    dtype = gen_dtype(env, 'state action next_state reward done timeout')
    buffer = Dataset(dtype=dtype, max_size=FLAGS.TD3.buffer_size)
    saver = nn.ModuleDict({'actor': actor, 'critic': critic})
    print(saver)

    n_steps = np.zeros(env.n_envs)
    n_returns = np.zeros(env.n_envs)

    train_returns = collections.deque(maxlen=40)
    train_lengths = collections.deque(maxlen=40)
    states = env.reset()
    time_st = time.time()
    for t in range(FLAGS.TD3.total_timesteps):
        if t < FLAGS.TD3.init_random_steps:
            actions = np.array(
                [env.action_space.sample() for _ in range(env.n_envs)])
        else:
            raw_actions = actor.get_actions(states)
            noises = np.random.normal(loc=0.,
                                      scale=FLAGS.TD3.explore_noise,
                                      size=raw_actions.shape)
            actions = np.clip(raw_actions + noises, -1, 1)
        next_states, rewards, dones, infos = env.step(actions)
        n_returns += rewards
        n_steps += 1
        timeouts = n_steps == env.max_episode_steps
        terminals = np.copy(dones)
        for e, info in enumerate(infos):
            if info.get('TimeLimit.truncated', False):
                terminals[e] = False

        transitions = [
            states, actions,
            next_states.copy(), rewards, terminals,
            timeouts.copy()
        ]
        buffer.extend(np.rec.fromarrays(transitions, dtype=dtype))

        indices = np.where(dones | timeouts)[0]
        if len(indices) > 0:
            next_states[indices] = env.partial_reset(indices)

            train_returns.extend(n_returns[indices])
            train_lengths.extend(n_steps[indices])
            n_returns[indices] = 0
            n_steps[indices] = 0
        states = next_states.copy()

        if t == 2000:
            assert env.n_envs == 1
            samples = buffer.sample(size=None, indices=np.arange(2000))
            masks = 1 - (samples.done | samples.timeout)[..., np.newaxis]
            masks = masks[:-1]
            assert np.allclose(samples.state[1:] * masks,
                               samples.next_state[:-1] * masks)

        if t >= FLAGS.TD3.init_random_steps:
            samples = buffer.sample(FLAGS.TD3.batch_size)
            train_info = td3.train(samples)
            if t % FLAGS.TD3.log_freq == 0:
                fps = int(t / (time.time() - time_st))
                train_info['fps'] = fps
                log_kvs(prefix='TD3',
                        kvs=dict(iter=t,
                                 episode=dict(
                                     returns=np.mean(train_returns)
                                     if len(train_returns) > 0 else 0.,
                                     lengths=int(
                                         np.mean(train_lengths)
                                         if len(train_lengths) > 0 else 0)),
                                 **train_info))

        if t % FLAGS.TD3.eval_freq == 0:
            eval_returns, eval_lengths = evaluate(actor,
                                                  env_eval,
                                                  deterministic=False)
            log_kvs(prefix='Evaluate',
                    kvs=dict(iter=t,
                             episode=dict(returns=np.mean(eval_returns),
                                          lengths=int(np.mean(eval_lengths)))))

        if t % FLAGS.TD3.save_freq == 0:
            np.save('{}/stage-{}'.format(FLAGS.log_dir, t), saver.state_dict())
            np.save('{}/final'.format(FLAGS.log_dir), saver.state_dict())

    np.save('{}/final'.format(FLAGS.log_dir), saver.state_dict())
Пример #6
0
def main():
    FLAGS.set_seed()
    FLAGS.freeze()

    env = make_env(FLAGS.env.id,
                   FLAGS.env.env_type,
                   num_env=FLAGS.env.num_env,
                   seed=FLAGS.seed,
                   log_dir=FLAGS.log_dir,
                   rescale_action=FLAGS.env.rescale_action)
    env_eval = make_env(FLAGS.env.id,
                        FLAGS.env.env_type,
                        num_env=4,
                        seed=FLAGS.seed + 1000,
                        log_dir=FLAGS.log_dir)
    dim_state = env.observation_space.shape[0]
    dim_action = env.action_space.shape[0]

    actor = Actor(dim_state,
                  dim_action,
                  hidden_sizes=FLAGS.SAC.actor_hidden_sizes)
    critic = Critic(dim_state,
                    dim_action,
                    hidden_sizes=FLAGS.SAC.critic_hidden_sizes)
    target_entropy = FLAGS.SAC.target_entropy
    if target_entropy is None:
        target_entropy = -dim_action
    sac = SAC(dim_state,
              dim_action,
              actor=actor,
              critic=critic,
              target_entropy=target_entropy,
              **FLAGS.SAC.algo.as_dict())

    tf.get_default_session().run(tf.global_variables_initializer())
    sac.update_critic_target(tau=0.0)

    dtype = gen_dtype(env, 'state action next_state reward done')
    buffer = Dataset(dtype=dtype, max_size=FLAGS.SAC.buffer_size)
    saver = nn.ModuleDict({'actor': actor, 'critic': critic})
    print(saver)

    n_steps = np.zeros(env.n_envs)
    n_returns = np.zeros(env.n_envs)

    train_returns = collections.deque(maxlen=40)
    train_lengths = collections.deque(maxlen=40)
    states = env.reset()
    time_st = time.time()
    for t in range(FLAGS.SAC.total_timesteps):
        if t < FLAGS.SAC.init_random_steps:
            actions = np.array(
                [env.action_space.sample() for _ in range(env.n_envs)])
        else:
            actions = actor.get_actions(states)
        next_states, rewards, dones, infos = env.step(actions)
        n_returns += rewards
        n_steps += 1
        timeouts = n_steps == env.max_episode_steps
        terminals = np.copy(dones)
        for e, info in enumerate(infos):
            if FLAGS.SAC.peb and info.get('TimeLimit.truncated', False):
                terminals[e] = False

        transitions = [states, actions, next_states.copy(), rewards, terminals]
        buffer.extend(np.rec.fromarrays(transitions, dtype=dtype))

        indices = np.where(dones | timeouts)[0]
        if len(indices) > 0:
            next_states[indices] = env.partial_reset(indices)

            train_returns.extend(n_returns[indices])
            train_lengths.extend(n_steps[indices])
            n_returns[indices] = 0
            n_steps[indices] = 0
        states = next_states.copy()

        if t >= FLAGS.SAC.init_random_steps:
            samples = buffer.sample(FLAGS.SAC.batch_size)
            train_info = sac.train(samples)
            if t % FLAGS.SAC.log_freq == 0:
                fps = int(t / (time.time() - time_st))
                train_info['fps'] = fps
                log_kvs(prefix='SAC',
                        kvs=dict(iter=t,
                                 episode=dict(
                                     returns=np.mean(train_returns)
                                     if len(train_returns) > 0 else 0.,
                                     lengths=int(
                                         np.mean(train_lengths)
                                         if len(train_lengths) > 0 else 0)),
                                 **train_info))

        if t % FLAGS.SAC.eval_freq == 0:
            eval_returns, eval_lengths = evaluate(actor, env_eval)
            log_kvs(prefix='Evaluate',
                    kvs=dict(iter=t,
                             episode=dict(returns=np.mean(eval_returns),
                                          lengths=int(np.mean(eval_lengths)))))

        if t % FLAGS.SAC.save_freq == 0:
            np.save('{}/stage-{}'.format(FLAGS.log_dir, t), saver.state_dict())
            np.save('{}/final'.format(FLAGS.log_dir), saver.state_dict())

    np.save('{}/final'.format(FLAGS.log_dir), saver.state_dict())