Exemple #1
0
    def __init__(self, config):
        self.config = config

        self.envs = []
        for _ in range(config['env_num']):
            env = gym.make(config['env_name'])
            env.seed(ENV_SEED)
            env = MonitorEnv(env)
            env = ClipRewardEnv(env)
            env = StateStack(env, k=4)
            self.envs.append(env)
            # env = gym.make(config['env_name'])
            # obs_shape = env.observation_space.shape

        self.vector_env = VectorEnv(self.envs)

        self.obs_batch = self.vector_env.reset()
        obs_dim = self.envs[0].observation_space.shape
        act_dim = self.envs[0].action_space.shape[0]
        max_action = float(self.envs[0].action_space.high[0])
        # obs_shape = env.observation_space.shape
        # act_dim = env.action_space.n

        model = MujocoModel(act_dim)
        algorithm = DVtrace(
            model,
            max_action,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_dim, act_dim)
Exemple #2
0
    def __init__(self, config):
        self.config = config
        self.sample_data_queue = queue.Queue(
            maxsize=config['sample_queue_max_size'])

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape

        act_dim = env.action_space.n

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.IMPALA(
            model,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_shape, act_dim,
                                self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        self.cache_params = self.agent.get_weights()
        self.params_lock = threading.Lock()
        self.params_updated = False
        self.cache_params_sent_cnt = 0
        self.total_params_sync = 0

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.kl_stat = WindowStat(100)
        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        #========== Remote Actor ===========
        self.remote_count = 0

        self.batch_buffer = []
        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()
Exemple #3
0
def main():
    env = get_player(args.game_name, image_size=IMAGE_SIZE, train=True)
    #print(env)
    # test_env = get_player(args.game_name,image_size=IMAGE_SIZE,context_len=CONTEXT_LEN)
    rpm = ReplayMemory(MEMORY_SIZE, IMAGE_SIZE, CONTEXT_LEN)
    act_dim = env.action_space.n

    model = AtariModel(act_dim, args.algo)
    if args.algo == 'Double':
        algorithm = parl.algorithms.DDQN(model, act_dim=act_dim, gamma=GAMMA)
    elif args.algo in ['DQN', 'Dueling']:
        algorithm = parl.algorithms.DQN(model, act_dim=act_dim, gamma=GAMMA)
    agent = AtariAgent(algorithm,
                       act_dim=act_dim,
                       start_lr=LEARNING_RATE,
                       total_step=args.train_total_steps,
                       update_freq=UPDATE_FREQ)

    with tqdm(total=MEMORY_WARMUP_SIZE, desc='[Replay Memory 启动]') as pbar:
        while rpm.size() < MEMORY_WARMUP_SIZE:
            total_reward, steps, _ = run_train_episode(env, agent, rpm)
            pbar.update(steps)
    # train
    test_flag = 0
    pbar = tqdm(total=args.train_total_steps)
    total_steps = 0
    max_reward = None
    while total_steps < args.train_total_steps:
        # start epoch
        total_reward, steps, loss = run_train_episode(env, agent, rpm)
        total_steps += steps
        pbar.set_description('[train]exploration:{}'.format(agent.exploration))

        summary.add_scalar('dqn/score', total_reward, total_steps)
        summary.add_scalar('dqn/loss', loss, total_steps)  # mean of total loss
        summary.add_scalar('dqn/exploration', agent.exploration, total_steps)
        pbar.update(steps)

        # if total_steps // args.test_every_steps >= test_flag:
        #     while total_steps // args.test_every_steps >= test_flag:
        #         test_flag += 1
        #     pbar.write("testing")
        #     eval_rewards = []
        #     for _ in tqdm(range(3), desc='eval agent'):
        #         eval_reward = run_evaluate_episode(test_env, agent)
        #         eval_rewards.append(eval_reward)
        #     logger.info(
        #         "eval_agent done, (steps, eval_reward): ({}, {})".format(
        #             total_steps, np.mean(eval_rewards)))
        #     eval_test = np.mean(eval_rewards)
        #     summary.add_scalar('dqn/eval', eval_test, total_steps)

    pbar.close()
    save_path = './dqn_model.ckpt'
    agent.save(save_path)
Exemple #4
0
    def __init__(self, config):
        self.config = config

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n
        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(algorithm, config)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        #========== Learner ==========

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        #========== Remote Actor ===========
        self.remote_count = 0
        self.sample_data_queue = queue.Queue()

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.params_queues = []
        self.create_actors()
Exemple #5
0
def main():
    env = get_player(
        args.rom, image_size=IMAGE_SIZE, train=True, frame_skip=FRAME_SKIP)
    file_path = "memory.npz"
    rpm = ReplayMemory(
        MEMORY_SIZE,
        IMAGE_SIZE,
        CONTEXT_LEN,
        load_file=True,  # load replay memory data from file
        file_path=file_path)
    act_dim = env.action_space.n

    model = AtariModel(act_dim)
    algorithm = DQN(
        model, act_dim=act_dim, gamma=GAMMA, lr=LEARNING_RATE * gpu_num)
    agent = AtariAgent(
        algorithm, act_dim=act_dim, total_step=args.train_total_steps)
    if os.path.isfile('./model.ckpt'):
        logger.info("load model from file")
        agent.restore('./model.ckpt')

    if args.train:
        logger.info("train with memory data")
        run_train_step(agent, rpm)
        logger.info("finish training. Save the model.")
        agent.save('./model.ckpt')
    else:
        logger.info("collect experience")
        collect_exp(env, rpm, agent)
        rpm.save_memory()
        logger.info("finish collecting, save successfully")
Exemple #6
0
    def __init__(self, config):
        self.config = config

        self.envs = []
        for _ in range(config['env_num']):
            env = gym.make(config['env_name'])
            env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
            self.envs.append(env)
        self.vector_env = VectorEnv(self.envs)

        self.obs_batch = self.vector_env.reset()

        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(algorithm, config)
Exemple #7
0
    def __init__(self, config):
        self.config = config

        self.envs = []
        for _ in range(config['env_num']):
            env = gym.make(config['env_name'])
            env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
            self.envs.append(env)
        self.vector_env = VectorEnv(self.envs)

        self.obs_batch = self.vector_env.reset()

        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.IMPALA(
            model,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_shape, act_dim)
Exemple #8
0
class Actor(object):
    def __init__(self, config):
        self.config = config

        self.envs = []
        for _ in range(config['env_num']):
            env = gym.make(config['env_name'])
            env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
            self.envs.append(env)
        self.vector_env = VectorEnv(self.envs)

        self.obs_batch = self.vector_env.reset()

        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.IMPALA(
            model,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_shape, act_dim)

    def sample(self):
        env_sample_data = {}
        for env_id in range(self.config['env_num']):
            env_sample_data[env_id] = defaultdict(list)

        for i in range(self.config['sample_batch_steps']):
            actions, behaviour_logits = self.agent.sample(
                np.stack(self.obs_batch))
            next_obs_batch, reward_batch, done_batch, info_batch = \
                    self.vector_env.step(actions)

            for env_id in range(self.config['env_num']):
                env_sample_data[env_id]['obs'].append(self.obs_batch[env_id])
                env_sample_data[env_id]['actions'].append(actions[env_id])
                env_sample_data[env_id]['behaviour_logits'].append(
                    behaviour_logits[env_id])
                env_sample_data[env_id]['rewards'].append(reward_batch[env_id])
                env_sample_data[env_id]['dones'].append(done_batch[env_id])

            self.obs_batch = next_obs_batch

        # Merge data of envs
        sample_data = defaultdict(list)
        for env_id in range(self.config['env_num']):
            for data_name in [
                    'obs', 'actions', 'behaviour_logits', 'rewards', 'dones'
            ]:
                sample_data[data_name].extend(
                    env_sample_data[env_id][data_name])

        # size of sample_data: env_num * sample_batch_steps
        for key in sample_data:
            sample_data[key] = np.stack(sample_data[key])

        return sample_data

    def get_metrics(self):
        metrics = defaultdict(list)
        for env in self.envs:
            monitor = get_wrapper_by_cls(env, MonitorEnv)
            if monitor is not None:
                for episode_rewards, episode_steps in monitor.next_episode_results(
                ):
                    metrics['episode_rewards'].append(episode_rewards)
                    metrics['episode_steps'].append(episode_steps)
        return metrics

    def set_weights(self, weights):
        self.agent.set_weights(weights)
Exemple #9
0
    def __init__(self, config):
        self.config = config

        self.sample_data_queue = queue.Queue()
        self.batch_buffer = defaultdict(list)

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(
            algorithm,
            obs_shape=self.config['obs_shape'],
            predict_thread_num=self.config['predict_thread_num'],
            learn_data_provider=self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_YOU_WANT_TO_USE]` .'

        else:
            cpu_num = os.environ.get('CPU_NUM')
            assert cpu_num is not None and cpu_num == '1', 'Only support training in single CPU,\
                    Please set environment variable:  `export CPU_NUM=1`.'

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        # learn thread
        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        self.predict_input_queue = queue.Queue()

        # predict thread
        self.predict_threads = []
        for i in six.moves.range(self.config['predict_thread_num']):
            predict_thread = threading.Thread(target=self.run_predict,
                                              args=(i, ))
            predict_thread.setDaemon(True)
            predict_thread.start()
            self.predict_threads.append(predict_thread)

        #========== Remote Simulator ===========
        self.remote_count = 0

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()
Exemple #10
0
class Learner(object):
    def __init__(self, config):
        self.config = config

        self.sample_data_queue = queue.Queue()
        self.batch_buffer = defaultdict(list)

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(
            algorithm,
            obs_shape=self.config['obs_shape'],
            predict_thread_num=self.config['predict_thread_num'],
            learn_data_provider=self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_YOU_WANT_TO_USE]` .'

        else:
            cpu_num = os.environ.get('CPU_NUM')
            assert cpu_num is not None and cpu_num == '1', 'Only support training in single CPU,\
                    Please set environment variable:  `export CPU_NUM=1`.'

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        # learn thread
        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        self.predict_input_queue = queue.Queue()

        # predict thread
        self.predict_threads = []
        for i in six.moves.range(self.config['predict_thread_num']):
            predict_thread = threading.Thread(target=self.run_predict,
                                              args=(i, ))
            predict_thread.setDaemon(True)
            predict_thread.start()
            self.predict_threads.append(predict_thread)

        #========== Remote Simulator ===========
        self.remote_count = 0

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()

    def learn_data_provider(self):
        """ Data generator for fluid.layers.py_reader
        """
        B = self.config['train_batch_size']
        while True:
            sample_data = self.sample_data_queue.get()
            self.sample_total_steps += len(sample_data['obs'])
            for key in sample_data:
                self.batch_buffer[key].extend(sample_data[key])

            if len(self.batch_buffer['obs']) >= B:
                batch = {}
                for key in self.batch_buffer:
                    batch[key] = np.array(self.batch_buffer[key][:B])

                obs_np = batch['obs'].astype('float32')
                actions_np = batch['actions'].astype('int64')
                advantages_np = batch['advantages'].astype('float32')
                target_values_np = batch['target_values'].astype('float32')

                self.lr = self.lr_scheduler.step()
                self.lr = np.array(self.lr, dtype='float32')
                self.entropy_coeff = self.entropy_coeff_scheduler.step()
                self.entropy_coeff = np.array(self.entropy_coeff,
                                              dtype='float32')

                yield [
                    obs_np, actions_np, advantages_np, target_values_np,
                    self.lr, self.entropy_coeff
                ]

                for key in self.batch_buffer:
                    self.batch_buffer[key] = self.batch_buffer[key][B:]

    def run_predict(self, thread_id):
        """ predict thread
        """
        batch_ident = []
        batch_obs = []
        while True:
            ident, obs = self.predict_input_queue.get()

            batch_ident.append(ident)
            batch_obs.append(obs)
            while len(batch_obs) < self.config['max_predict_batch_size']:
                try:
                    ident, obs = self.predict_input_queue.get_nowait()
                    batch_ident.append(ident)
                    batch_obs.append(obs)
                except queue.Empty:
                    break
            if batch_obs:
                batch_obs = np.array(batch_obs)
                actions, values = self.agent.sample(batch_obs, thread_id)

                for i, ident in enumerate(batch_ident):
                    self.predict_output_queues[ident].put(
                        (actions[i], values[i]))
                batch_ident = []
                batch_obs = []

    def run_learn(self):
        """ Learn loop
        """
        while True:
            with self.learn_time_stat:
                total_loss, pi_loss, vf_loss, entropy = self.agent.learn()

            self.total_loss_stat.add(total_loss)
            self.pi_loss_stat.add(pi_loss)
            self.vf_loss_stat.add(vf_loss)
            self.entropy_stat.add(entropy)

    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        ident = 0
        self.predict_output_queues = []

        for i in six.moves.range(self.config['actor_num']):

            self.remote_count += 1
            logger.info('Remote simulator count: {}'.format(self.remote_count))
            if self.start_time is None:
                self.start_time = time.time()

            q = queue.Queue()
            self.predict_output_queues.append(q)

            remote_thread = threading.Thread(target=self.run_remote_sample,
                                             args=(ident, ))
            remote_thread.setDaemon(True)
            remote_thread.start()
            ident += 1

    def run_remote_sample(self, ident):
        """ Interacts with remote simulator.
        """
        remote_actor = Actor(self.config)
        mem = defaultdict(list)

        obs = remote_actor.reset()
        while True:
            self.predict_input_queue.put((ident, obs))
            action, value = self.predict_output_queues[ident].get()

            next_obs, reward, done = remote_actor.step(action)

            mem['obs'].append(obs)
            mem['actions'].append(action)
            mem['rewards'].append(reward)
            mem['values'].append(value)

            if done:
                next_value = 0
                advantages = calc_gae(mem['rewards'], mem['values'],
                                      next_value, self.config['gamma'],
                                      self.config['lambda'])
                target_values = advantages + mem['values']

                self.sample_data_queue.put({
                    'obs': mem['obs'],
                    'actions': mem['actions'],
                    'advantages': advantages,
                    'target_values': target_values
                })

                mem = defaultdict(list)

                next_obs = remote_actor.reset()

            elif len(mem['obs']) == self.config['t_max'] + 1:
                next_value = mem['values'][-1]
                advantages = calc_gae(mem['rewards'][:-1], mem['values'][:-1],
                                      next_value, self.config['gamma'],
                                      self.config['lambda'])
                target_values = advantages + mem['values'][:-1]

                self.sample_data_queue.put({
                    'obs': mem['obs'][:-1],
                    'actions': mem['actions'][:-1],
                    'advantages': advantages,
                    'target_values': target_values
                })

                for key in mem:
                    mem[key] = [mem[key][-1]]

            obs = next_obs

            if done:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

    def log_metrics(self):
        """ Log metrics of learner and simulators
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'Sample steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        for key, value in metric.items():
            if value is not None:
                summary.add_scalar(key, value, self.sample_total_steps)

        logger.info(metric)
Exemple #11
0
class Learner(object):
    def __init__(self, config):
        self.config = config
        self.sample_data_queue = queue.Queue(
            maxsize=config['sample_queue_max_size'])

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape

        act_dim = env.action_space.n

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.IMPALA(
            model,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_shape, act_dim,
                                self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        self.cache_params = self.agent.get_weights()
        self.params_lock = threading.Lock()
        self.params_updated = False
        self.cache_params_sent_cnt = 0
        self.total_params_sync = 0

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.kl_stat = WindowStat(100)
        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        #========== Remote Actor ===========
        self.remote_count = 0

        self.batch_buffer = []
        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()

    def learn_data_provider(self):
        """ Data generator for fluid.layers.py_reader
        """
        while True:
            sample_data = self.sample_data_queue.get()
            self.sample_total_steps += sample_data['obs'].shape[0]
            self.batch_buffer.append(sample_data)

            buffer_size = sum(
                [data['obs'].shape[0] for data in self.batch_buffer])
            if buffer_size >= self.config['train_batch_size']:
                batch = {}
                for key in self.batch_buffer[0].keys():
                    batch[key] = np.concatenate(
                        [data[key] for data in self.batch_buffer])
                self.batch_buffer = []

                obs_np = batch['obs'].astype('float32')
                actions_np = batch['actions'].astype('int64')
                behaviour_logits_np = batch['behaviour_logits'].astype(
                    'float32')
                rewards_np = batch['rewards'].astype('float32')
                dones_np = batch['dones'].astype('float32')

                self.lr = self.lr_scheduler.step()
                self.entropy_coeff = self.entropy_coeff_scheduler.step()

                yield [
                    obs_np, actions_np, behaviour_logits_np, rewards_np,
                    dones_np,
                    np.float32(self.lr),
                    np.array([self.entropy_coeff], dtype='float32')
                ]

    def run_learn(self):
        """ Learn loop
        """
        while True:
            with self.learn_time_stat:
                total_loss, pi_loss, vf_loss, entropy, kl = self.agent.learn()

            self.params_updated = True

            self.total_loss_stat.add(total_loss)
            self.pi_loss_stat.add(pi_loss)
            self.vf_loss_stat.add(vf_loss)
            self.entropy_stat.add(entropy)
            self.kl_stat.add(kl)

    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        for i in range(self.config['actor_num']):
            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))
            if self.start_time is None:
                self.start_time = time.time()

            remote_thread = threading.Thread(target=self.run_remote_sample)
            remote_thread.setDaemon(True)
            remote_thread.start()

    def run_remote_sample(self):
        """ Sample data from remote actor and update parameters of remote actor.
        """
        remote_actor = Actor(self.config)

        cnt = 0
        remote_actor.set_weights(self.cache_params)
        while True:
            batch = remote_actor.sample()
            self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

            self.params_lock.acquire()

            if self.params_updated and self.cache_params_sent_cnt >= self.config[
                    'params_broadcast_interval']:
                self.params_updated = False
                self.cache_params = self.agent.get_weights()
                self.cache_params_sent_cnt = 0
            self.cache_params_sent_cnt += 1
            self.total_params_sync += 1

            self.params_lock.release()

            remote_actor.set_weights(self.cache_params)

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'sample_steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'sample_queue_size': self.sample_data_queue.qsize(),
            'total_params_sync': self.total_params_sync,
            'cache_params_sent_cnt': self.cache_params_sent_cnt,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'kl': self.kl_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        for key, value in metric.items():
            if value is not None:
                summary.add_scalar(key, value, self.sample_total_steps)

        logger.info(metric)
Exemple #12
0
if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--game_name', default='Phoenix-v0')
    test_env = get_player('Phoenix-v0',
                          image_size=IMAGE_SIZE,
                          context_len=CONTEXT_LEN)
    save_path = './dqn_model.ckpt'

    act_dim = test_env.action_space.n

    model = AtariModel(act_dim)
    algorithm = parl.algorithms.DQN(model, act_dim=act_dim, gamma=GAMMA)

    agent = AtariAgent(algorithm,
                       act_dim=act_dim,
                       start_lr=LEARNING_RATE,
                       total_step=test_number,
                       update_freq=UPDATE_FREQ)

    agent.restore(save_path)
    eval_rewards = []
    flag = 0

    while flag < test_number:
        eval_reward = run_evaluate_episode(test_env, agent)

        #eval_rewards.append(eval_reward)
        logger.info("eval_agent done, (steps, eval_reward): ({}, {})".format(
            flag, eval_reward))
        flag += 1
Exemple #13
0
class Actor(object):
    def __init__(self, config):
        self.config = config

        self.envs = []
        for _ in range(config['env_num']):
            env = gym.make(config['env_name'])
            env.seed(ENV_SEED)
            env = MonitorEnv(env)
            env = ClipRewardEnv(env)
            env = StateStack(env, k=4)
            self.envs.append(env)
            # env = gym.make(config['env_name'])
            # obs_shape = env.observation_space.shape

        self.vector_env = VectorEnv(self.envs)

        self.obs_batch = self.vector_env.reset()
        obs_dim = self.envs[0].observation_space.shape
        act_dim = self.envs[0].action_space.shape[0]
        max_action = float(self.envs[0].action_space.high[0])
        # obs_shape = env.observation_space.shape
        # act_dim = env.action_space.n

        model = MujocoModel(act_dim)
        algorithm = DVtrace(
            model,
            max_action,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_dim, act_dim)

    def sample(self):
        env_sample_data = {}
        for env_id in range(self.config['env_num']):
            env_sample_data[env_id] = defaultdict(list)

        for i in range(self.config['sample_batch_steps']):
            actions, mean, std = self.agent.sample(self.obs_batch)
            next_obs_batch, reward_batch, done_batch, info_batch = self.vector_env.step(actions)

            for env_id in range(self.config['env_num']):
                env_sample_data[env_id]['obs'].append(self.obs_batch[env_id])
                env_sample_data[env_id]['actions'].append(actions[env_id])
                env_sample_data[env_id]['mean'].append(mean[env_id])
                env_sample_data[env_id]['std'].append(std[env_id])
                env_sample_data[env_id]['rewards'].append(reward_batch[env_id])
                env_sample_data[env_id]['dones'].append(done_batch[env_id])

            self.obs_batch = next_obs_batch

        # Merge data of envs
        sample_data = defaultdict(list)
        for env_id in range(self.config['env_num']):
            for data_name in [
                    'obs', 'actions', 'mean', 'std', 'rewards', 'dones'
            ]:
                sample_data[data_name].extend(
                    env_sample_data[env_id][data_name])

        # size of sample_data: env_num * sample_batch_steps
        for key in sample_data:
            sample_data[key] = np.stack(sample_data[key])

        return sample_data

    def get_metrics(self):
        metrics = defaultdict(list)
        for env in self.envs:
            monitor = get_wrapper_by_cls(env, MonitorEnv)
            if monitor is not None:
                for episode_rewards, episode_steps in monitor.next_episode_results(
                ):
                    metrics['episode_rewards'].append(episode_rewards)
                    metrics['episode_steps'].append(episode_steps)
        return metrics

    def set_weights(self, weights):
        self.agent.set_weights(weights)
Exemple #14
0
def main():
    # Prepare environments
    # env = get_player(
    #     args.rom, image_size=IMAGE_SIZE, train=True, frame_skip=FRAME_SKIP)
    # test_env = get_player(
    #     args.rom,
    #     image_size=IMAGE_SIZE,
    #     frame_skip=FRAME_SKIP,
    #     context_len=CONTEXT_LEN)
    env = gym.make("pseudoslam:RobotExploration-v0")
    env = MonitorEnv(env, param={'goal': args.goal, 'obs': args.obs})

    # obs = env.reset()
    # print(obs.shape)
    # raise NotImplementedError
    # Init Prioritized Replay Memory
    per = ProportionalPER(alpha=0.6, seg_num=args.batch_size, size=MEMORY_SIZE)
    suffix = args.suffix + "_Rp{}_Goal{}_Obs{}".format(args.Rp, args.goal,
                                                       args.obs)
    logdir = os.path.join(args.logdir, suffix)
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    logger.set_dir(logdir)
    modeldir = os.path.join(args.modeldir, suffix)
    if not os.path.exists(modeldir):
        os.mkdir(modeldir)

    # Prepare PARL agent
    act_dim = env.action_space.n
    model = AtariModel(act_dim)
    if args.alg == 'ddqn':
        algorithm = PrioritizedDoubleDQN(model,
                                         act_dim=act_dim,
                                         gamma=GAMMA,
                                         lr=LEARNING_RATE)
    elif args.alg == 'dqn':
        algorithm = PrioritizedDQN(model,
                                   act_dim=act_dim,
                                   gamma=GAMMA,
                                   lr=LEARNING_RATE)
    agent = AtariAgent(algorithm, act_dim=act_dim, update_freq=UPDATE_FREQ)
    if os.path.exists(args.load):
        agent.restore(args.load)
    # Replay memory warmup
    total_step = 0
    with tqdm(total=MEMORY_SIZE, desc='[Replay Memory Warm Up]') as pbar:
        mem = []
        while total_step < MEMORY_WARMUP_SIZE:
            total_reward, steps, _, _ = run_episode(env,
                                                    agent,
                                                    per,
                                                    mem=mem,
                                                    warmup=True)
            total_step += steps
            pbar.update(steps)
    per.elements.from_list(mem[:int(MEMORY_WARMUP_SIZE)])

    # env_name = args.rom.split('/')[-1].split('.')[0]
    test_flag = 0
    total_steps = 0
    pbar = tqdm(total=args.train_total_steps)
    save_steps = 0
    while total_steps < args.train_total_steps:
        # start epoch
        total_reward, steps, loss, info = run_episode(env,
                                                      agent,
                                                      per,
                                                      train=True)
        total_steps += steps
        save_steps += steps
        pbar.set_description('[train]exploration:{}'.format(agent.exploration))
        summary.add_scalar('train/score', total_reward, total_steps)
        summary.add_scalar('train/loss', loss,
                           total_steps)  # mean of total loss
        summary.add_scalar('train/exploration', agent.exploration, total_steps)
        summary.add_scalar('train/steps', steps, total_steps)
        for key in info.keys():
            summary.add_scalar('train/' + key, info[key], total_steps)
        pbar.update(steps)

        if total_steps // args.test_every_steps >= test_flag:
            print('start test!')
            while total_steps // args.test_every_steps >= test_flag:
                test_flag += 1
            pbar.write("testing")
            test_rewards = []
            for _ in tqdm(range(3), desc='eval agent'):
                eval_reward = run_evaluate_episode(env, agent)
                test_rewards.append(eval_reward)
            eval_reward = np.mean(test_rewards)
            logger.info(
                "eval_agent done, (steps, eval_reward): ({}, {})".format(
                    total_steps, eval_reward))
            summary.add_scalar('eval/reward', eval_reward, total_steps)
        if save_steps >= 100000:
            modeldir_ = os.path.join(modeldir, 'itr_{}'.format(total_steps))
            if not os.path.exists(modeldir_):
                os.mkdir(modeldir_)
            print('save model!', modeldir_)
            agent.save(modeldir_)
            save_steps = 0

    pbar.close()
Exemple #15
0
class Learner(object):
    def __init__(self, config):
        self.config = config

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n
        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(algorithm, config)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        #========== Learner ==========

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        #========== Remote Actor ===========
        self.remote_count = 0
        self.sample_data_queue = queue.Queue()

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.params_queues = []
        self.create_actors()

    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        for i in six.moves.range(self.config['actor_num']):
            params_queue = queue.Queue()
            self.params_queues.append(params_queue)

            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))

            remote_thread = threading.Thread(target=self.run_remote_sample,
                                             args=(params_queue, ))
            remote_thread.setDaemon(True)
            remote_thread.start()

        logger.info('All remote actors are ready, begin to learn.')
        self.start_time = time.time()

    def run_remote_sample(self, params_queue):
        """ Sample data from remote actor and update parameters of remote actor.
        """
        remote_actor = Actor(self.config)

        cnt = 0
        while True:
            latest_params = params_queue.get()
            remote_actor.set_weights(latest_params)
            batch = remote_actor.sample()

            self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

    def step(self):
        """
        1. kick off all actors to synchronize parameters and sample data;
        2. collect sample data of all actors;
        3. update parameters.
        """

        latest_params = self.agent.get_weights()
        for params_queue in self.params_queues:
            params_queue.put(latest_params)

        train_batch = defaultdict(list)
        for i in range(self.config['actor_num']):
            sample_data = self.sample_data_queue.get()
            for key, value in sample_data.items():
                train_batch[key].append(value)

            self.sample_total_steps += sample_data['obs'].shape[0]

        for key, value in train_batch.items():
            train_batch[key] = np.concatenate(value)

        with self.learn_time_stat:
            total_loss, pi_loss, vf_loss, entropy, lr, entropy_coeff = self.agent.learn(
                obs_np=train_batch['obs'],
                actions_np=train_batch['actions'],
                advantages_np=train_batch['advantages'],
                target_values_np=train_batch['target_values'])

        self.total_loss_stat.add(total_loss)
        self.pi_loss_stat.add(pi_loss)
        self.vf_loss_stat.add(vf_loss)
        self.entropy_stat.add(entropy)
        self.lr = lr
        self.entropy_coeff = entropy_coeff

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'sample_steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        for key, value in metric.items():
            if value is not None:
                summary.add_scalar(key, value, self.sample_total_steps)

        logger.info(metric)

    def should_stop(self):
        return self.sample_total_steps >= self.config['max_sample_steps']
Exemple #16
0
def main():
    # Prepare environments
    env = get_player(
        args.rom, image_size=IMAGE_SIZE, train=True, frame_skip=FRAME_SKIP)
    test_env = get_player(
        args.rom,
        image_size=IMAGE_SIZE,
        frame_skip=FRAME_SKIP,
        context_len=CONTEXT_LEN)

    # Init Prioritized Replay Memory
    per = ProportionalPER(alpha=0.6, seg_num=args.batch_size, size=MEMORY_SIZE)

    # Prepare PARL agent
    act_dim = env.action_space.n
    model = AtariModel(act_dim)
    if args.alg == 'ddqn':
        algorithm = PrioritizedDoubleDQN(
            model, act_dim=act_dim, gamma=GAMMA, lr=LEARNING_RATE)
    elif args.alg == 'dqn':
        algorithm = PrioritizedDQN(
            model, act_dim=act_dim, gamma=GAMMA, lr=LEARNING_RATE)
    agent = AtariAgent(algorithm, act_dim=act_dim, update_freq=UPDATE_FREQ)

    # Replay memory warmup
    total_step = 0
    with tqdm(total=MEMORY_SIZE, desc='[Replay Memory Warm Up]') as pbar:
        mem = []
        while total_step < MEMORY_WARMUP_SIZE:
            total_reward, steps, _ = run_episode(
                env, agent, per, mem=mem, warmup=True)
            total_step += steps
            pbar.update(steps)
    per.elements.from_list(mem[:int(MEMORY_WARMUP_SIZE)])

    env_name = args.rom.split('/')[-1].split('.')[0]

    test_flag = 0
    total_steps = 0
    pbar = tqdm(total=args.train_total_steps)
    while total_steps < args.train_total_steps:
        # start epoch
        total_reward, steps, loss = run_episode(env, agent, per, train=True)
        total_steps += steps
        pbar.set_description('[train]exploration:{}'.format(agent.exploration))
        summary.add_scalar('{}/score'.format(env_name), total_reward,
                           total_steps)
        summary.add_scalar('{}/loss'.format(env_name), loss,
                           total_steps)  # mean of total loss
        summary.add_scalar('{}/exploration'.format(env_name),
                           agent.exploration, total_steps)
        pbar.update(steps)

        if total_steps // args.test_every_steps >= test_flag:
            while total_steps // args.test_every_steps >= test_flag:
                test_flag += 1
            pbar.write("testing")
            test_rewards = []
            for _ in tqdm(range(3), desc='eval agent'):
                eval_reward = run_evaluate_episode(test_env, agent)
                test_rewards.append(eval_reward)
            eval_reward = np.mean(test_rewards)
            logger.info(
                "eval_agent done, (steps, eval_reward): ({}, {})".format(
                    total_steps, eval_reward))
            summary.add_scalar('{}/eval'.format(env_name), eval_reward,
                               total_steps)

    pbar.close()
Exemple #17
0
class Actor(object):
    def __init__(self, config):
        self.config = config

        self.envs = []
        for _ in range(config['env_num']):
            env = gym.make(config['env_name'])
            env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
            self.envs.append(env)
        self.vector_env = VectorEnv(self.envs)

        self.obs_batch = self.vector_env.reset()

        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(algorithm, config)

    def sample(self):
        sample_data = defaultdict(list)

        env_sample_data = {}
        for env_id in range(self.config['env_num']):
            env_sample_data[env_id] = defaultdict(list)

        for i in range(self.config['sample_batch_steps']):
            actions_batch, values_batch = self.agent.sample(
                np.stack(self.obs_batch))
            next_obs_batch, reward_batch, done_batch, info_batch = \
                    self.vector_env.step(actions_batch)

            for env_id in range(self.config['env_num']):
                env_sample_data[env_id]['obs'].append(self.obs_batch[env_id])
                env_sample_data[env_id]['actions'].append(
                    actions_batch[env_id])
                env_sample_data[env_id]['rewards'].append(reward_batch[env_id])
                env_sample_data[env_id]['dones'].append(done_batch[env_id])
                env_sample_data[env_id]['values'].append(values_batch[env_id])

                # Calculate advantages when the episode is done or reach max sample steps.
                if done_batch[
                        env_id] or i == self.config['sample_batch_steps'] - 1:
                    next_value = 0
                    if not done_batch[env_id]:
                        next_obs = np.expand_dims(next_obs_batch[env_id], 0)
                        next_value = self.agent.value(next_obs)

                    values = env_sample_data[env_id]['values']
                    rewards = env_sample_data[env_id]['rewards']
                    advantages = calc_gae(rewards, values, next_value,
                                          self.config['gamma'],
                                          self.config['lambda'])
                    target_values = advantages + values

                    sample_data['obs'].extend(env_sample_data[env_id]['obs'])
                    sample_data['actions'].extend(
                        env_sample_data[env_id]['actions'])
                    sample_data['advantages'].extend(advantages)
                    sample_data['target_values'].extend(target_values)

                    env_sample_data[env_id] = defaultdict(list)

            self.obs_batch = next_obs_batch

        # size of sample_data: env_num * sample_batch_steps
        for key in sample_data:
            sample_data[key] = np.stack(sample_data[key])

        return sample_data

    def get_metrics(self):
        metrics = defaultdict(list)
        for env in self.envs:
            monitor = get_wrapper_by_cls(env, MonitorEnv)
            if monitor is not None:
                for episode_rewards, episode_steps in monitor.next_episode_results(
                ):
                    metrics['episode_rewards'].append(episode_rewards)
                    metrics['episode_steps'].append(episode_steps)
        return metrics

    def set_weights(self, params):
        self.agent.set_weights(params)