Esempio n. 1
0
class TimeStat(object):
    """A time stat for logging the elapsed time of code running

    Example:
        time_stat = TimeStat()
        with time_stat:
            // some code
        print(time_stat.mean)
    """
    def __init__(self, window_size=1):
        self.time_samples = WindowStat(window_size)
        self._start_time = None

    def __enter__(self):
        self._start_time = time.time()

    def __exit__(self, type, value, tb):
        time_delta = time.time() - self._start_time
        self.time_samples.add(time_delta)

    @property
    def mean(self):
        return self.time_samples.mean

    @property
    def min(self):
        return self.time_samples.min

    @property
    def max(self):
        return self.time_samples.max
Esempio n. 2
0
    def __init__(self, config):
        self.config = config

        env = gym.make(self.config['env_name'])
        self.config['obs_dim'] = env.observation_space.shape[0]
        self.config['act_dim'] = env.action_space.shape[0]

        self.obs_filter = MeanStdFilter(self.config['obs_dim'])
        self.noise = SharedNoiseTable(self.config['noise_size'])

        model = MujocoModel(self.config['act_dim'])
        algorithm = ES(model)
        self.agent = MujocoAgent(algorithm, self.config)

        self.latest_flat_weights = self.agent.get_flat_weights()
        self.latest_obs_filter = self.obs_filter.as_serializable()

        self.sample_total_episodes = 0
        self.sample_total_steps = 0

        self.actors_signal_input_queues = []
        self.actors_output_queues = []

        self.create_actors()

        self.eval_rewards_stat = WindowStat(self.config['report_window_size'])
        self.eval_lengths_stat = WindowStat(self.config['report_window_size'])
Esempio n. 3
0
    def __init__(self, config):
        self.config = config
        self.sample_data_queue = queue.Queue(
            maxsize=config['sample_queue_max_size'])

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape

        act_dim = env.action_space.n

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.IMPALA(
            model,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_shape, act_dim,
                                self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        self.cache_params = self.agent.get_weights()
        self.params_lock = threading.Lock()
        self.params_updated = False
        self.cache_params_sent_cnt = 0
        self.total_params_sync = 0

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.kl_stat = WindowStat(100)
        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        #========== Remote Actor ===========
        self.remote_count = 0

        self.batch_buffer = []
        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()
Esempio n. 4
0
    def __init__(self, config):
        self.config = config
        self.sample_data_queue = queue.Queue(
            maxsize=config['sample_queue_max_size'])

        #=========== Create Agent ==========
        env = IntraBuildingEnv("config.ini")
        self._mansion_attr = env._mansion.attribute
        self._obs_dim = obs_dim(self._mansion_attr)
        self._act_dim = act_dim(self._mansion_attr)

        self.config['obs_shape'] = self._obs_dim
        self.config['act_dim'] = self._act_dim

        model = RLDispatcherModel(self._act_dim)
        algorithm = IMPALA(model, hyperparas=config)
        self.agent = ElevatorAgent(algorithm, config, self.learn_data_provider)

        self.cache_params = self.agent.get_params()
        self.params_lock = threading.Lock()
        self.params_updated = False
        self.cache_params_sent_cnt = 0
        self.total_params_sync = 0

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.kl_stat = WindowStat(100)
        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        #========== Remote Actor ===========
        self.remote_count = 0

        self.batch_buffer = []
        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.remote_manager_thread = threading.Thread(
            target=self.run_remote_manager)
        self.remote_manager_thread.setDaemon(True)
        self.remote_manager_thread.start()

        self.csv_logger = CSVLogger(
            os.path.join(logger.get_dir(), 'result.csv'))

        from utils import Summary
        self.summary = Summary('./output')
Esempio n. 5
0
    def __init__(self, config):
        self.config = config

        # 这里创建游戏单纯是为了获取游戏动作的维度
        env = retro_util.RetroEnv(game=config['env_name'],
                                  use_restricted_actions=retro.Actions.DISCRETE,
                                  resize_shape=config['obs_shape'],
                                  render_preprocess=False)
        obs_dim = env.observation_space.shape
        action_dim = env.action_space.n
        self.config['action_dim'] = action_dim

        # 这里创建的模型是真正学习使用的
        model = Model(action_dim)
        algorithm = parl.algorithms.A3C(model, vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = Agent(algorithm, config, obs_dim)

        # 只支持单个GPU
        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        # 加载预训练模型
        if self.config['restore_model']:
            logger.info("加载预训练模型...")
            self.agent.restore(self.config['model_path'])

        # 记录训练的日志
        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.best_loss = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        # ========== Remote Actor ===========
        self.remote_count = 0
        self.sample_data_queue = queue.Queue()

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.params_queues = []
        self.create_actors()
Esempio n. 6
0
    def __init__(self, config, cuda):
        self.cuda = cuda

        self.config = config
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n
        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = ActorCritic(act_dim)
        if self.cuda:
            model = model.cuda()

        algorithm = A2C(model, config)
        self.agent = Agent(algorithm, config)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_YOU_WANT_TO_USE]` .'

        else:
            os.environ['CPU_NUM'] = str(1)

        #========== Learner ==========
        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        #========== Remote Actor ===========
        self.remote_count = 0
        self.sample_total_steps = 0
        self.sample_data_queue = queue.Queue()
        self.remote_metrics_queue = queue.Queue()
        self.params_queues = []

        self.create_actors()
Esempio n. 7
0
    def __init__(self, args):
        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        else:
            cpu_num = os.environ.get('CPU_NUM')
            assert cpu_num is not None and cpu_num == '1', 'Only support training in single CPU,\
                    Please set environment variable:  `export CPU_NUM=1`.'

        model = OpenSimModel(OBS_DIM, VEL_DIM, ACT_DIM)
        algorithm = parl.algorithms.DDPG(
            model,
            gamma=GAMMA,
            tau=TAU,
            actor_lr=ACTOR_LR,
            critic_lr=CRITIC_LR)
        self.agent = OpenSimAgent(algorithm, OBS_DIM, ACT_DIM)

        self.rpm = ReplayMemory(args.rpm_size, OBS_DIM, ACT_DIM)

        if args.restore_rpm_path is not None:
            self.rpm.load(args.restore_rpm_path)
        if args.restore_model_path is not None:
            self.restore(args.restore_model_path)

        # add lock between training and predicting
        self.model_lock = threading.Lock()

        # add lock when appending data to rpm or writing scalars to summary
        self.memory_lock = threading.Lock()

        self.ready_actor_queue = queue.Queue()

        self.total_steps = 0
        self.noiselevel = 0.5

        self.critic_loss_stat = WindowStat(500)
        self.env_reward_stat = WindowStat(500)
        self.shaping_reward_stat = WindowStat(500)
        self.max_env_reward = 0

        # thread to keep training
        learn_thread = threading.Thread(target=self.keep_training)
        learn_thread.setDaemon(True)
        learn_thread.start()

        self.create_actors()
Esempio n. 8
0
class Learner(object):
    def __init__(self, config):
        self.config = config
        self.sample_data_queue = queue.Queue(
            maxsize=config['sample_queue_max_size'])

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape

        act_dim = env.action_space.n

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.IMPALA(
            model,
            sample_batch_steps=self.config['sample_batch_steps'],
            gamma=self.config['gamma'],
            vf_loss_coeff=self.config['vf_loss_coeff'],
            clip_rho_threshold=self.config['clip_rho_threshold'],
            clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
        self.agent = AtariAgent(algorithm, obs_shape, act_dim,
                                self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        self.cache_params = self.agent.get_weights()
        self.params_lock = threading.Lock()
        self.params_updated = False
        self.cache_params_sent_cnt = 0
        self.total_params_sync = 0

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.kl_stat = WindowStat(100)
        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        #========== Remote Actor ===========
        self.remote_count = 0

        self.batch_buffer = []
        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()

    def learn_data_provider(self):
        """ Data generator for fluid.layers.py_reader
        """
        while True:
            sample_data = self.sample_data_queue.get()
            self.sample_total_steps += sample_data['obs'].shape[0]
            self.batch_buffer.append(sample_data)

            buffer_size = sum(
                [data['obs'].shape[0] for data in self.batch_buffer])
            if buffer_size >= self.config['train_batch_size']:
                batch = {}
                for key in self.batch_buffer[0].keys():
                    batch[key] = np.concatenate(
                        [data[key] for data in self.batch_buffer])
                self.batch_buffer = []

                obs_np = batch['obs'].astype('float32')
                actions_np = batch['actions'].astype('int64')
                behaviour_logits_np = batch['behaviour_logits'].astype(
                    'float32')
                rewards_np = batch['rewards'].astype('float32')
                dones_np = batch['dones'].astype('float32')

                self.lr = self.lr_scheduler.step()
                self.entropy_coeff = self.entropy_coeff_scheduler.step()

                yield [
                    obs_np, actions_np, behaviour_logits_np, rewards_np,
                    dones_np,
                    np.float32(self.lr),
                    np.array([self.entropy_coeff], dtype='float32')
                ]

    def run_learn(self):
        """ Learn loop
        """
        while True:
            with self.learn_time_stat:
                total_loss, pi_loss, vf_loss, entropy, kl = self.agent.learn()

            self.params_updated = True

            self.total_loss_stat.add(total_loss)
            self.pi_loss_stat.add(pi_loss)
            self.vf_loss_stat.add(vf_loss)
            self.entropy_stat.add(entropy)
            self.kl_stat.add(kl)

    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        for i in range(self.config['actor_num']):
            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))
            if self.start_time is None:
                self.start_time = time.time()

            remote_thread = threading.Thread(target=self.run_remote_sample)
            remote_thread.setDaemon(True)
            remote_thread.start()

    def run_remote_sample(self):
        """ Sample data from remote actor and update parameters of remote actor.
        """
        remote_actor = Actor(self.config)

        cnt = 0
        remote_actor.set_weights(self.cache_params)
        while True:
            batch = remote_actor.sample()
            self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

            self.params_lock.acquire()

            if self.params_updated and self.cache_params_sent_cnt >= self.config[
                    'params_broadcast_interval']:
                self.params_updated = False
                self.cache_params = self.agent.get_weights()
                self.cache_params_sent_cnt = 0
            self.cache_params_sent_cnt += 1
            self.total_params_sync += 1

            self.params_lock.release()

            remote_actor.set_weights(self.cache_params)

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'sample_steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'sample_queue_size': self.sample_data_queue.qsize(),
            'total_params_sync': self.total_params_sync,
            'cache_params_sent_cnt': self.cache_params_sent_cnt,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'kl': self.kl_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        for key, value in metric.items():
            if value is not None:
                summary.add_scalar(key, value, self.sample_total_steps)

        logger.info(metric)
Esempio n. 9
0
class Learner(object):
    def __init__(self, args):
        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        else:
            cpu_num = os.environ.get('CPU_NUM')
            assert cpu_num is not None and cpu_num == '1', 'Only support training in single CPU,\
                    Please set environment variable:  `export CPU_NUM=1`.'

        model = OpenSimModel(OBS_DIM, VEL_DIM, ACT_DIM)
        algorithm = parl.algorithms.DDPG(
            model,
            gamma=GAMMA,
            tau=TAU,
            actor_lr=ACTOR_LR,
            critic_lr=CRITIC_LR)
        self.agent = OpenSimAgent(algorithm, OBS_DIM, ACT_DIM)

        self.rpm = ReplayMemory(args.rpm_size, OBS_DIM, ACT_DIM)

        if args.restore_rpm_path is not None:
            self.rpm.load(args.restore_rpm_path)
        if args.restore_model_path is not None:
            self.restore(args.restore_model_path)

        # add lock between training and predicting
        self.model_lock = threading.Lock()

        # add lock when appending data to rpm or writing scalars to summary
        self.memory_lock = threading.Lock()

        self.ready_actor_queue = queue.Queue()

        self.total_steps = 0
        self.noiselevel = 0.5

        self.critic_loss_stat = WindowStat(500)
        self.env_reward_stat = WindowStat(500)
        self.shaping_reward_stat = WindowStat(500)
        self.max_env_reward = 0

        # thread to keep training
        learn_thread = threading.Thread(target=self.keep_training)
        learn_thread.setDaemon(True)
        learn_thread.start()

        self.create_actors()

    def create_actors(self):
        """Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(args.cluster_address, ['official_obs_scaler.npz'])

        for i in range(args.actor_num):
            logger.info('Remote actor count: {}'.format(i + 1))

            remote_thread = threading.Thread(target=self.run_remote_sample)
            remote_thread.setDaemon(True)
            remote_thread.start()

        # There is a memory-leak problem in osim-rl package.
        # So we will dynamically add actors when remote actors killed due to excessive memory usage.
        time.sleep(10 * 60)
        parl_client = get_global_client()
        while True:
            if parl_client.actor_num < args.actor_num:
                logger.info(
                    'Dynamic adding acotr, current actor num:{}'.format(
                        parl_client.actor_num))
                remote_thread = threading.Thread(target=self.run_remote_sample)
                remote_thread.setDaemon(True)
                remote_thread.start()
            time.sleep(5)

    def _new_ready_actor(self):
        """ 

        The actor is ready to start new episode,
        but blocking until training thread call actor_ready_event.set()
        """
        actor_ready_event = threading.Event()
        self.ready_actor_queue.put(actor_ready_event)
        logger.info(
            "[new_avaliabe_actor] approximate size of ready actors:{}".format(
                self.ready_actor_queue.qsize()))
        actor_ready_event.wait()

    def run_remote_sample(self):
        remote_actor = Actor(
            difficulty=args.difficulty,
            vel_penalty_coeff=args.vel_penalty_coeff,
            muscle_penalty_coeff=args.muscle_penalty_coeff,
            penalty_coeff=args.penalty_coeff,
            only_first_target=args.only_first_target)

        actor_state = ActorState()

        while True:
            obs = remote_actor.reset()
            actor_state.reset()

            while True:
                actor_state.memory.append(
                    TransitionExperience(
                        obs=obs,
                        action=None,
                        reward=None,
                        info=None,
                        timestamp=time.time()))

                action = self.pred_batch(obs)

                # For each target, decay noise as the steps increase.
                step = len(
                    actor_state.memory) - actor_state.last_target_changed_steps
                current_noise = self.noiselevel * (0.98**(step - 1))

                noise = np.zeros((ACT_DIM, ), dtype=np.float32)
                if actor_state.ident % 3 == 0:
                    if step % 5 == 0:
                        noise = np.random.randn(ACT_DIM) * current_noise
                elif actor_state.ident % 3 == 1:
                    if step % 5 == 0:
                        noise = np.random.randn(ACT_DIM) * current_noise * 2
                action += noise

                action = np.clip(action, -1, 1)

                obs, reward, done, info = remote_actor.step(action)

                reward_scale = (1 - GAMMA)
                info['shaping_reward'] *= reward_scale

                actor_state.memory[-1].reward = reward
                actor_state.memory[-1].info = info
                actor_state.memory[-1].action = action

                if 'target_changed' in info and info['target_changed']:
                    actor_state.update_last_target_changed()

                if done:
                    self._parse_memory(actor_state, last_obs=obs)
                    break

            self._new_ready_actor()

    def _parse_memory(self, actor_state, last_obs):
        mem = actor_state.memory
        n = len(mem)

        episode_shaping_reward = np.sum(
            [exp.info['shaping_reward'] for exp in mem])
        episode_env_reward = np.sum([exp.info['env_reward'] for exp in mem])
        episode_time = time.time() - mem[0].timestamp

        episode_rpm = []
        for i in range(n - 1):
            episode_rpm.append([
                mem[i].obs, mem[i].action, mem[i].info['shaping_reward'],
                mem[i + 1].obs, False
            ])
        episode_rpm.append([
            mem[-1].obs, mem[-1].action, mem[-1].info['shaping_reward'],
            last_obs, not mem[-1].info['timeout']
        ])

        with self.memory_lock:
            self.total_steps += n
            self.add_episode_rpm(episode_rpm)

            if actor_state.ident % 3 == 2:  # trajectory without noise
                self.env_reward_stat.add(episode_env_reward)
                self.shaping_reward_stat.add(episode_shaping_reward)
                self.max_env_reward = max(self.max_env_reward,
                                          episode_env_reward)

                if self.env_reward_stat.count > 500:
                    summary.add_scalar('recent_env_reward',
                                       self.env_reward_stat.mean,
                                       self.total_steps)
                    summary.add_scalar('recent_shaping_reward',
                                       self.shaping_reward_stat.mean,
                                       self.total_steps)
                if self.critic_loss_stat.count > 500:
                    summary.add_scalar('recent_critic_loss',
                                       self.critic_loss_stat.mean,
                                       self.total_steps)
                summary.add_scalar('episode_length', n, self.total_steps)
                summary.add_scalar('max_env_reward', self.max_env_reward,
                                   self.total_steps)
                summary.add_scalar('ready_actor_num',
                                   self.ready_actor_queue.qsize(),
                                   self.total_steps)
                summary.add_scalar('episode_time', episode_time,
                                   self.total_steps)

            self.noiselevel = self.noiselevel * NOISE_DECAY

    def learn(self):
        start_time = time.time()

        for T in range(args.train_times):
            [states, actions, rewards, new_states,
             dones] = self.rpm.sample_batch(BATCH_SIZE)
            with self.model_lock:
                critic_loss = self.agent.learn(states, actions, rewards,
                                               new_states, dones)
            self.critic_loss_stat.add(critic_loss)
        logger.info(
            "[learn] time consuming:{}".format(time.time() - start_time))

    def keep_training(self):
        episode_count = 1000000
        for T in range(episode_count):
            if self.rpm.size() > BATCH_SIZE * args.warm_start_batchs:
                self.learn()
                logger.info(
                    "[keep_training/{}] trying to acq a new env".format(T))

            # Keep training and predicting balance
            # After training, wait for a ready actor, and make the actor start new episode
            ready_actor_event = self.ready_actor_queue.get()
            ready_actor_event.set()

            if np.mod(T, 100) == 0:
                logger.info("saving models")
                self.save(T)
            if np.mod(T, 10000) == 0:
                logger.info("saving rpm")
                self.save_rpm()

    def save_rpm(self):
        save_path = os.path.join(logger.get_dir(), "rpm.npz")
        self.rpm.save(save_path)

    def save(self, T):
        save_path = os.path.join(
            logger.get_dir(), 'model_every_100_episodes/episodes-{}'.format(T))
        self.agent.save(save_path)

    def restore(self, model_path):
        logger.info('restore model from {}'.format(model_path))
        self.agent.restore(model_path)

    def add_episode_rpm(self, episode_rpm):
        for x in episode_rpm:
            self.rpm.append(
                obs=x[0], act=x[1], reward=x[2], next_obs=x[3], terminal=x[4])

    def pred_batch(self, obs):
        batch_obs = np.expand_dims(obs, axis=0)

        with self.model_lock:
            action = self.agent.predict(batch_obs.astype('float32'))

        action = np.squeeze(action, axis=0)
        return action
Esempio n. 10
0
class Learner(object):
    def __init__(self, config):
        self.config = config

        env = gym.make(self.config['env_name'])
        self.config['obs_dim'] = env.observation_space.shape[0]
        self.config['act_dim'] = env.action_space.shape[0]

        self.obs_filter = MeanStdFilter(self.config['obs_dim'])
        self.noise = SharedNoiseTable(self.config['noise_size'])

        model = MujocoModel(self.config['act_dim'])
        algorithm = ES(model)
        self.agent = MujocoAgent(algorithm, self.config)

        self.latest_flat_weights = self.agent.get_flat_weights()
        self.latest_obs_filter = self.obs_filter.as_serializable()

        self.sample_total_episodes = 0
        self.sample_total_steps = 0

        self.actors_signal_input_queues = []
        self.actors_output_queues = []

        self.create_actors()

        self.eval_rewards_stat = WindowStat(self.config['report_window_size'])
        self.eval_lengths_stat = WindowStat(self.config['report_window_size'])

    def create_actors(self):
        """ create actors for parallel training.
        """

        parl.connect(self.config['master_address'])
        self.remote_count = 0
        for i in range(self.config['actor_num']):
            signal_queue = queue.Queue()
            output_queue = queue.Queue()
            self.actors_signal_input_queues.append(signal_queue)
            self.actors_output_queues.append(output_queue)

            self.remote_count += 1

            remote_thread = threading.Thread(target=self.run_remote_sample,
                                             args=(signal_queue, output_queue))
            remote_thread.setDaemon(True)
            remote_thread.start()

        logger.info('All remote actors are ready, begin to learn.')

    def run_remote_sample(self, signal_queue, output_queue):
        """ Sample data from remote actor or get filters of remote actor. 
        """
        remote_actor = Actor(self.config)
        while True:
            info = signal_queue.get()
            if info['signal'] == 'sample':
                result = remote_actor.sample(self.latest_flat_weights)
                output_queue.put(result)
            elif info['signal'] == 'get_filter':
                actor_filter = remote_actor.get_filter(flush_after=True)
                output_queue.put(actor_filter)
            elif info['signal'] == 'set_filter':
                remote_actor.set_filter(self.latest_obs_filter)
            else:
                raise NotImplementedError

    def step(self):
        """Run a step in ES.

        1. kick off all actors to synchronize weights and sample data;
        2. update parameters of the model based on sampled data.
        3. update global observation filter based on local filters of all actors, and synchronize global 
           filter to all actors.
        """
        num_episodes, num_timesteps = 0, 0
        results = []

        while num_episodes < self.config['min_episodes_per_batch'] or \
                num_timesteps < self.config['min_steps_per_batch']:
            # Send sample signal to all actors
            for q in self.actors_signal_input_queues:
                q.put({'signal': 'sample'})

            # Collect results from all actors
            for q in self.actors_output_queues:
                result = q.get()
                results.append(result)
                # result['noisy_lengths'] is a list of lists, where the inner lists have length 2.
                num_episodes += sum(
                    len(pair) for pair in result['noisy_lengths'])
                num_timesteps += sum(
                    sum(pair) for pair in result['noisy_lengths'])

        all_noise_indices = []
        all_training_rewards = []
        all_training_lengths = []
        all_eval_rewards = []
        all_eval_lengths = []

        for result in results:
            all_eval_rewards.extend(result['eval_rewards'])
            all_eval_lengths.extend(result['eval_lengths'])

            all_noise_indices.extend(result['noise_indices'])
            all_training_rewards.extend(result['noisy_rewards'])
            all_training_lengths.extend(result['noisy_lengths'])

        assert len(all_eval_rewards) == len(all_eval_lengths)
        assert (len(all_noise_indices) == len(all_training_rewards) ==
                len(all_training_lengths))

        self.sample_total_episodes += num_episodes
        self.sample_total_steps += num_timesteps

        eval_rewards = np.array(all_eval_rewards)
        eval_lengths = np.array(all_eval_lengths)
        noise_indices = np.array(all_noise_indices)
        noisy_rewards = np.array(all_training_rewards)
        noisy_lengths = np.array(all_training_lengths)

        # normalize rewards to (-0.5, 0.5)
        proc_noisy_rewards = utils.compute_centered_ranks(noisy_rewards)
        noises = [
            self.noise.get(index, self.agent.weights_total_size)
            for index in noise_indices
        ]

        # Update the parameters of the model.
        self.agent.learn(proc_noisy_rewards, noises)
        self.latest_flat_weights = self.agent.get_flat_weights()

        # Update obs filter
        self._update_filter()

        # Store the evaluate rewards
        if len(all_eval_rewards) > 0:
            self.eval_rewards_stat.add(np.mean(eval_rewards))
            self.eval_lengths_stat.add(np.mean(eval_lengths))

        metrics = {
            "episodes_this_iter": noisy_lengths.size,
            "sample_total_episodes": self.sample_total_episodes,
            'sample_total_steps': self.sample_total_steps,
            "evaluate_rewards_mean": self.eval_rewards_stat.mean,
            "evaluate_steps_mean": self.eval_lengths_stat.mean,
            "timesteps_this_iter": noisy_lengths.sum(),
        }

        self.log_metrics(metrics)
        return metrics

    def _update_filter(self):
        # Send get_filter signal to all actors
        for q in self.actors_signal_input_queues:
            q.put({'signal': 'get_filter'})

        filters = []
        # Collect filters from  all actors and update global filter
        for q in self.actors_output_queues:
            actor_filter = q.get()
            self.obs_filter.apply_changes(actor_filter)

        # Send set_filter signal to all actors
        self.latest_obs_filter = self.obs_filter.as_serializable()
        for q in self.actors_signal_input_queues:
            q.put({'signal': 'set_filter'})

    def log_metrics(self, metrics):
        logger.info(metrics)
        for k, v in metrics.items():
            if v is not None:
                summary.add_scalar(k, v, self.sample_total_steps)
Esempio n. 11
0
class Learner(object):
    def __init__(self, config, cuda):
        self.cuda = cuda

        self.config = config
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n
        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = ActorCritic(act_dim)
        if self.cuda:
            model = model.cuda()

        algorithm = A2C(model, config)
        self.agent = Agent(algorithm, config)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_YOU_WANT_TO_USE]` .'

        else:
            os.environ['CPU_NUM'] = str(1)

        #========== Learner ==========
        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        #========== Remote Actor ===========
        self.remote_count = 0
        self.sample_total_steps = 0
        self.sample_data_queue = queue.Queue()
        self.remote_metrics_queue = queue.Queue()
        self.params_queues = []

        self.create_actors()

    def create_actors(self):
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        for i in six.moves.range(self.config['actor_num']):
            params_queue = queue.Queue()
            self.params_queues.append(params_queue)

            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))

            remote_thread = threading.Thread(
                target=self.run_remote_sample, args=(params_queue, ))
            remote_thread.setDaemon(True)
            remote_thread.start()

        logger.info('All remote actors are ready, begin to learn.')
        self.start_time = time.time()

    def run_remote_sample(self, params_queue):
        remote_actor = Actor(self.config)

        cnt = 0
        while True:
            latest_params = params_queue.get()

            remote_actor.set_weights(latest_params)
            batch = remote_actor.sample()
            self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

    def step(self):
        latest_params = self.agent.get_weights()

        for params_queue in self.params_queues:
            params_queue.put(latest_params)

        train_batch = defaultdict(list)
        for i in range(self.config['actor_num']):
            sample_data = self.sample_data_queue.get()
            for key, value in sample_data.items():
                train_batch[key].append(value)
            self.sample_total_steps += len(sample_data['obs'])

        for key, value in train_batch.items():
            train_batch[key] = np.concatenate(value)
            train_batch[key] = torch.tensor(train_batch[key]).float()
            if self.cuda:
                train_batch[key] = train_batch[key].cuda()

        with self.learn_time_stat:
            total_loss, pi_loss, vf_loss, entropy, lr, entropy_coeff = self.agent.learn(
                obs=train_batch['obs'],
                actions=train_batch['actions'],
                advantages=train_batch['advantages'],
                target_values=train_batch['target_values'],
            )

        self.total_loss_stat.add(total_loss.item())
        self.pi_loss_stat.add(pi_loss.item())
        self.vf_loss_stat.add(vf_loss.item())
        self.entropy_stat.add(entropy.item())
        self.lr = lr
        self.entropy_coeff = entropy_coeff

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'Sample steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        if metric['mean_episode_rewards'] is not None:
            summary.add_scalar('train/mean_reward',
                               metric['mean_episode_rewards'],
                               self.sample_total_steps)
            summary.add_scalar('train/total_loss', metric['total_loss'],
                               self.sample_total_steps)
            summary.add_scalar('train/pi_loss', metric['pi_loss'],
                               self.sample_total_steps)
            summary.add_scalar('train/vf_loss', metric['vf_loss'],
                               self.sample_total_steps)
            summary.add_scalar('train/entropy', metric['entropy'],
                               self.sample_total_steps)
            summary.add_scalar('train/learn_rate', metric['lr'],
                               self.sample_total_steps)

        logger.info(metric)

    def should_stop(self):
        return self.sample_total_steps >= self.config['max_sample_steps']
Esempio n. 12
0
class Learner(object):
    def __init__(self, config):
        self.config = config

        # 这里创建游戏单纯是为了获取游戏动作的维度
        env = retro_util.RetroEnv(game=config['env_name'],
                                  use_restricted_actions=retro.Actions.DISCRETE,
                                  resize_shape=config['obs_shape'],
                                  render_preprocess=False)
        obs_dim = env.observation_space.shape
        action_dim = env.action_space.n
        self.config['action_dim'] = action_dim

        # 这里创建的模型是真正学习使用的
        model = Model(action_dim)
        algorithm = parl.algorithms.A3C(model, vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = Agent(algorithm, config, obs_dim)

        # 只支持单个GPU
        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        # 加载预训练模型
        if self.config['restore_model']:
            logger.info("加载预训练模型...")
            self.agent.restore(self.config['model_path'])

        # 记录训练的日志
        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.best_loss = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        # ========== Remote Actor ===========
        self.remote_count = 0
        self.sample_data_queue = queue.Queue()

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.params_queues = []
        self.create_actors()

    # 开始创建指定数量的Actor,并发放的集群中
    def create_actors(self):
        # 连接到集群
        parl.connect(self.config['master_address'])
        logger.info('Waiting for {} remote actors to connect.'.format(self.config['actor_num']))

        # 循环生成多个Actor线程
        for i in range(self.config['actor_num']):
            # 更新参数的队列
            params_queue = queue.Queue()
            self.params_queues.append(params_queue)

            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))
            # 创建Actor的线程
            remote_thread = threading.Thread(target=self.run_remote_sample, args=(params_queue,))
            remote_thread.setDaemon(True)
            remote_thread.start()

        logger.info('All remote actors are ready, begin to learn.')
        self.start_time = time.time()

    # 创建Actor,并使用无限循环更新Actor的模型参数和获取游戏数据
    def run_remote_sample(self, params_queue):
        # 创建Actor
        remote_actor = Actor(self.config)

        while True:
            # 获取train的模型参数
            latest_params = params_queue.get()
            # 设置Actor中的模型参数
            remote_actor.set_weights(latest_params)
            # 获取一小批的游戏数据
            batch = remote_actor.sample()
            # 将游戏数据添加的数据队列中
            self.sample_data_queue.put(batch)

    # 开始模型训练
    def step(self):
        """
        1. 启动所有Actor,同步参数和样本数据;
        2. 收集所有Actor生成的数据;
        3. 更新参数.
        """

        # 获取train中模型最新的参数
        latest_params = self.agent.get_weights()
        # 将参数同步给没有Actor线程的参数队列
        for params_queue in self.params_queues:
            params_queue.put(latest_params)

        train_batch = defaultdict(list)
        # 获取每个Actor生成的数据
        for i in range(self.config['actor_num']):
            sample_data = self.sample_data_queue.get()
            for key, value in sample_data.items():
                train_batch[key].append(value)

            # 记录训练步数
            self.sample_total_steps += sample_data['obs'].shape[0]

        # 将各个Actor的数据打包的训练数据
        for key, value in train_batch.items():
            train_batch[key] = np.concatenate(value)

        # 执行一次训练
        with self.learn_time_stat:
            total_loss, pi_loss, vf_loss, entropy, lr, entropy_coeff = self.agent.learn(
                obs_np=train_batch['obs'],
                actions_np=train_batch['actions'],
                advantages_np=train_batch['advantages'],
                target_values_np=train_batch['target_values'])

        # 记录训练数据
        self.total_loss_stat.add(total_loss)
        self.pi_loss_stat.add(pi_loss)
        self.vf_loss_stat.add(vf_loss)
        self.entropy_stat.add(entropy)
        self.lr = lr
        self.entropy_coeff = entropy_coeff

    # 保存训练日志
    def log_metrics(self):
        # 避免训练还未开始的情况
        if self.start_time is None:
            return
        # 获取最好的模型
        if self.best_loss is None:
            self.best_loss = self.total_loss_stat.mean
        else:
            if self.best_loss > self.total_loss_stat.mean:
                self.best_loss = self.total_loss_stat.mean
                self.save_model("model_best")
        # 训练数据写入到日志中
        summary.add_scalar('total_loss', self.total_loss_stat.mean, self.sample_total_steps)
        summary.add_scalar('pi_loss', self.pi_loss_stat.mean, self.sample_total_steps)
        summary.add_scalar('vf_loss', self.vf_loss_stat.mean, self.sample_total_steps)
        summary.add_scalar('entropy', self.entropy_stat.mean, self.sample_total_steps)
        summary.add_scalar('lr', self.lr, self.sample_total_steps)
        summary.add_scalar('entropy_coeff', self.entropy_coeff, self.sample_total_steps)
        logger.info('total_loss: {}'.format(self.total_loss_stat.mean))

    # 保存模型
    def save_model(self, model_name="model"):
        # 避免训练还未开始的情况
        if self.start_time is None:
            return
        save_path = os.path.join(self.config['model_path'], model_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        self.agent.save(save_path)

    # 检测训练步数是否达到最大步数
    def should_stop(self):
        return self.sample_total_steps >= self.config['max_sample_steps']
Esempio n. 13
0
class Learner(object):
    def __init__(self, config):
        self.config = config
        self.sample_data_queue = queue.Queue(
            maxsize=config['sample_queue_max_size'])

        #=========== Create Agent ==========
        env = IntraBuildingEnv("config.ini")
        self._mansion_attr = env._mansion.attribute
        self._obs_dim = obs_dim(self._mansion_attr)
        self._act_dim = act_dim(self._mansion_attr)

        self.config['obs_shape'] = self._obs_dim
        self.config['act_dim'] = self._act_dim

        model = RLDispatcherModel(self._act_dim)
        algorithm = IMPALA(model, hyperparas=config)
        self.agent = ElevatorAgent(algorithm, config, self.learn_data_provider)

        self.cache_params = self.agent.get_params()
        self.params_lock = threading.Lock()
        self.params_updated = False
        self.cache_params_sent_cnt = 0
        self.total_params_sync = 0

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.kl_stat = WindowStat(100)
        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        #========== Remote Actor ===========
        self.remote_count = 0

        self.batch_buffer = []
        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.remote_manager_thread = threading.Thread(
            target=self.run_remote_manager)
        self.remote_manager_thread.setDaemon(True)
        self.remote_manager_thread.start()

        self.csv_logger = CSVLogger(
            os.path.join(logger.get_dir(), 'result.csv'))

        from utils import Summary
        self.summary = Summary('./output')


    def learn_data_provider(self):
        """ Data generator for fluid.layers.py_reader
        """
        while True:
            sample_data = self.sample_data_queue.get()
            self.sample_total_steps += sample_data['obs'].shape[0]
            self.batch_buffer.append(sample_data)

            buffer_size = sum(
                [data['obs'].shape[0] for data in self.batch_buffer])
            if buffer_size >= self.config['train_batch_size']:
                batch = {}
                for key in self.batch_buffer[0].keys():
                    batch[key] = np.concatenate(
                        [data[key] for data in self.batch_buffer])
                self.batch_buffer = []

                obs_np = batch['obs'].astype('float32')
                actions_np = batch['actions'].astype('int64')
                behaviour_logits_np = batch['behaviour_logits'].astype(
                    'float32')
                rewards_np = batch['rewards'].astype('float32')
                dones_np = batch['dones'].astype('float32')

                self.lr = self.lr_scheduler.step()
                self.entropy_coeff = self.entropy_coeff_scheduler.step()

                yield [
                    obs_np, actions_np, behaviour_logits_np, rewards_np,
                    dones_np, self.lr, self.entropy_coeff
                ]

    def run_learn(self):
        """ Learn loop
        """
        while True:
            with self.learn_time_stat:
                total_loss, pi_loss, vf_loss, entropy, kl = self.agent.learn()

            self.params_updated = True

            self.total_loss_stat.add(total_loss)
            self.pi_loss_stat.add(pi_loss)
            self.vf_loss_stat.add(vf_loss)
            self.entropy_stat.add(entropy)
            self.kl_stat.add(kl)

    def run_remote_manager(self):
        """ Accept connection of new remote actor and start sampling of the remote actor.
        """
        remote_manager = RemoteManager(port=self.config['server_port'])
        logger.info('Waiting for remote actors connecting.')
        while True:
            remote_actor = remote_manager.get_remote()
            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))
            if self.start_time is None:
                self.start_time = time.time()

            remote_thread = threading.Thread(
                target=self.run_remote_sample, args=(remote_actor, ))
            remote_thread.setDaemon(True)
            remote_thread.start()

    def run_remote_sample(self, remote_actor):
        """ Sample data from remote actor and update parameters of remote actor.
        """
        cnt = 0
        remote_actor.set_params(self.cache_params)
        while True:
            batch = remote_actor.sample()
            if batch:
                self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

            self.params_lock.acquire()

            if self.params_updated and self.cache_params_sent_cnt >= self.config[
                    'params_broadcast_interval']:
                self.params_updated = False
                self.cache_params = self.agent.get_params()
                self.cache_params_sent_cnt = 0
            self.cache_params_sent_cnt += 1
            self.total_params_sync += 1

            self.params_lock.release()

            remote_actor.set_params(self.cache_params)

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps, episode_shaping_rewards, episode_deliver_rewards, episode_wrong_deliver_rewards = [], [], [], [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
            episode_shaping_rewards.extend(x['episode_shaping_rewards'])
            episode_deliver_rewards.extend(x['episode_deliver_rewards'])
            episode_wrong_deliver_rewards.extend(x['episode_wrong_deliver_rewards'])

        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        mean_episode_shaping_rewards, mean_episode_deliver_rewards, mean_episode_wrong_deliver_rewards = \
                None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten()) 
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())
            
            mean_episode_shaping_rewards = np.mean(np.array(episode_shaping_rewards).flatten())
            mean_episode_deliver_rewards = np.mean(np.array(episode_deliver_rewards).flatten())
            mean_episode_wrong_deliver_rewards = np.mean(np.array(episode_wrong_deliver_rewards).flatten())

        #print(self.learn_time_stat.time_samples.items)
        
        metric = {
            'Sample steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'mean_episode_shaping_rewards': mean_episode_shaping_rewards,
            'mean_episode_deliver_rewards': mean_episode_deliver_rewards,
            'mean_episode_wrong_deliver_rewards': mean_episode_wrong_deliver_rewards,
            #'max_episode_steps': max_episode_steps,
            #'mean_episode_steps': mean_episode_steps,
            #'min_episode_steps': min_episode_steps,
            'sample_queue_size': self.sample_data_queue.qsize(),
            'total_params_sync': self.total_params_sync,
            'cache_params_sent_cnt': self.cache_params_sent_cnt,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'kl': self.kl_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        logger.info(metric)
        self.csv_logger.log_dict(metric)
        self.summary.log_dict(metric, self.sample_total_steps)

    def close(self):
        self.csv_logger.close()
Esempio n. 14
0
 def __init__(self, window_size=1):
     self.time_samples = WindowStat(window_size)
     self._start_time = None
Esempio n. 15
0
class Learner(object):
    def __init__(self, config):
        self.config = config

        self.sample_data_queue = queue.Queue()
        self.batch_buffer = defaultdict(list)

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(
            algorithm,
            obs_shape=self.config['obs_shape'],
            predict_thread_num=self.config['predict_thread_num'],
            learn_data_provider=self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_YOU_WANT_TO_USE]` .'

        else:
            cpu_num = os.environ.get('CPU_NUM')
            assert cpu_num is not None and cpu_num == '1', 'Only support training in single CPU,\
                    Please set environment variable:  `export CPU_NUM=1`.'

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        # learn thread
        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        self.predict_input_queue = queue.Queue()

        # predict thread
        self.predict_threads = []
        for i in six.moves.range(self.config['predict_thread_num']):
            predict_thread = threading.Thread(target=self.run_predict,
                                              args=(i, ))
            predict_thread.setDaemon(True)
            predict_thread.start()
            self.predict_threads.append(predict_thread)

        #========== Remote Simulator ===========
        self.remote_count = 0

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()

    def learn_data_provider(self):
        """ Data generator for fluid.layers.py_reader
        """
        B = self.config['train_batch_size']
        while True:
            sample_data = self.sample_data_queue.get()
            self.sample_total_steps += len(sample_data['obs'])
            for key in sample_data:
                self.batch_buffer[key].extend(sample_data[key])

            if len(self.batch_buffer['obs']) >= B:
                batch = {}
                for key in self.batch_buffer:
                    batch[key] = np.array(self.batch_buffer[key][:B])

                obs_np = batch['obs'].astype('float32')
                actions_np = batch['actions'].astype('int64')
                advantages_np = batch['advantages'].astype('float32')
                target_values_np = batch['target_values'].astype('float32')

                self.lr = self.lr_scheduler.step()
                self.lr = np.array(self.lr, dtype='float32')
                self.entropy_coeff = self.entropy_coeff_scheduler.step()
                self.entropy_coeff = np.array(self.entropy_coeff,
                                              dtype='float32')

                yield [
                    obs_np, actions_np, advantages_np, target_values_np,
                    self.lr, self.entropy_coeff
                ]

                for key in self.batch_buffer:
                    self.batch_buffer[key] = self.batch_buffer[key][B:]

    def run_predict(self, thread_id):
        """ predict thread
        """
        batch_ident = []
        batch_obs = []
        while True:
            ident, obs = self.predict_input_queue.get()

            batch_ident.append(ident)
            batch_obs.append(obs)
            while len(batch_obs) < self.config['max_predict_batch_size']:
                try:
                    ident, obs = self.predict_input_queue.get_nowait()
                    batch_ident.append(ident)
                    batch_obs.append(obs)
                except queue.Empty:
                    break
            if batch_obs:
                batch_obs = np.array(batch_obs)
                actions, values = self.agent.sample(batch_obs, thread_id)

                for i, ident in enumerate(batch_ident):
                    self.predict_output_queues[ident].put(
                        (actions[i], values[i]))
                batch_ident = []
                batch_obs = []

    def run_learn(self):
        """ Learn loop
        """
        while True:
            with self.learn_time_stat:
                total_loss, pi_loss, vf_loss, entropy = self.agent.learn()

            self.total_loss_stat.add(total_loss)
            self.pi_loss_stat.add(pi_loss)
            self.vf_loss_stat.add(vf_loss)
            self.entropy_stat.add(entropy)

    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        ident = 0
        self.predict_output_queues = []

        for i in six.moves.range(self.config['actor_num']):

            self.remote_count += 1
            logger.info('Remote simulator count: {}'.format(self.remote_count))
            if self.start_time is None:
                self.start_time = time.time()

            q = queue.Queue()
            self.predict_output_queues.append(q)

            remote_thread = threading.Thread(target=self.run_remote_sample,
                                             args=(ident, ))
            remote_thread.setDaemon(True)
            remote_thread.start()
            ident += 1

    def run_remote_sample(self, ident):
        """ Interacts with remote simulator.
        """
        remote_actor = Actor(self.config)
        mem = defaultdict(list)

        obs = remote_actor.reset()
        while True:
            self.predict_input_queue.put((ident, obs))
            action, value = self.predict_output_queues[ident].get()

            next_obs, reward, done = remote_actor.step(action)

            mem['obs'].append(obs)
            mem['actions'].append(action)
            mem['rewards'].append(reward)
            mem['values'].append(value)

            if done:
                next_value = 0
                advantages = calc_gae(mem['rewards'], mem['values'],
                                      next_value, self.config['gamma'],
                                      self.config['lambda'])
                target_values = advantages + mem['values']

                self.sample_data_queue.put({
                    'obs': mem['obs'],
                    'actions': mem['actions'],
                    'advantages': advantages,
                    'target_values': target_values
                })

                mem = defaultdict(list)

                next_obs = remote_actor.reset()

            elif len(mem['obs']) == self.config['t_max'] + 1:
                next_value = mem['values'][-1]
                advantages = calc_gae(mem['rewards'][:-1], mem['values'][:-1],
                                      next_value, self.config['gamma'],
                                      self.config['lambda'])
                target_values = advantages + mem['values'][:-1]

                self.sample_data_queue.put({
                    'obs': mem['obs'][:-1],
                    'actions': mem['actions'][:-1],
                    'advantages': advantages,
                    'target_values': target_values
                })

                for key in mem:
                    mem[key] = [mem[key][-1]]

            obs = next_obs

            if done:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

    def log_metrics(self):
        """ Log metrics of learner and simulators
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'Sample steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        for key, value in metric.items():
            if value is not None:
                summary.add_scalar(key, value, self.sample_total_steps)

        logger.info(metric)
Esempio n. 16
0
    def __init__(self, config):
        self.config = config

        self.sample_data_queue = queue.Queue()
        self.batch_buffer = defaultdict(list)

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n

        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(
            algorithm,
            obs_shape=self.config['obs_shape'],
            predict_thread_num=self.config['predict_thread_num'],
            learn_data_provider=self.learn_data_provider)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_YOU_WANT_TO_USE]` .'

        else:
            cpu_num = os.environ.get('CPU_NUM')
            assert cpu_num is not None and cpu_num == '1', 'Only support training in single CPU,\
                    Please set environment variable:  `export CPU_NUM=1`.'

        #========== Learner ==========
        self.lr, self.entropy_coeff = None, None
        self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
        self.entropy_coeff_scheduler = PiecewiseScheduler(
            config['entropy_coeff_scheduler'])

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        # learn thread
        self.learn_thread = threading.Thread(target=self.run_learn)
        self.learn_thread.setDaemon(True)
        self.learn_thread.start()

        self.predict_input_queue = queue.Queue()

        # predict thread
        self.predict_threads = []
        for i in six.moves.range(self.config['predict_thread_num']):
            predict_thread = threading.Thread(target=self.run_predict,
                                              args=(i, ))
            predict_thread.setDaemon(True)
            predict_thread.start()
            self.predict_threads.append(predict_thread)

        #========== Remote Simulator ===========
        self.remote_count = 0

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.create_actors()
Esempio n. 17
0
class Learner(object):
    def __init__(self, config):
        self.config = config

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n
        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
        algorithm = parl.algorithms.A3C(model,
                                        vf_loss_coeff=config['vf_loss_coeff'])
        self.agent = AtariAgent(algorithm, config)

        if machine_info.is_gpu_available():
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'

        #========== Learner ==========

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        #========== Remote Actor ===========
        self.remote_count = 0
        self.sample_data_queue = queue.Queue()

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.params_queues = []
        self.create_actors()

    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
        """
        parl.connect(self.config['master_address'])

        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        for i in six.moves.range(self.config['actor_num']):
            params_queue = queue.Queue()
            self.params_queues.append(params_queue)

            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))

            remote_thread = threading.Thread(target=self.run_remote_sample,
                                             args=(params_queue, ))
            remote_thread.setDaemon(True)
            remote_thread.start()

        logger.info('All remote actors are ready, begin to learn.')
        self.start_time = time.time()

    def run_remote_sample(self, params_queue):
        """ Sample data from remote actor and update parameters of remote actor.
        """
        remote_actor = Actor(self.config)

        cnt = 0
        while True:
            latest_params = params_queue.get()
            remote_actor.set_weights(latest_params)
            batch = remote_actor.sample()

            self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

    def step(self):
        """
        1. kick off all actors to synchronize parameters and sample data;
        2. collect sample data of all actors;
        3. update parameters.
        """

        latest_params = self.agent.get_weights()
        for params_queue in self.params_queues:
            params_queue.put(latest_params)

        train_batch = defaultdict(list)
        for i in range(self.config['actor_num']):
            sample_data = self.sample_data_queue.get()
            for key, value in sample_data.items():
                train_batch[key].append(value)

            self.sample_total_steps += sample_data['obs'].shape[0]

        for key, value in train_batch.items():
            train_batch[key] = np.concatenate(value)

        with self.learn_time_stat:
            total_loss, pi_loss, vf_loss, entropy, lr, entropy_coeff = self.agent.learn(
                obs_np=train_batch['obs'],
                actions_np=train_batch['actions'],
                advantages_np=train_batch['advantages'],
                target_values_np=train_batch['target_values'])

        self.total_loss_stat.add(total_loss)
        self.pi_loss_stat.add(pi_loss)
        self.vf_loss_stat.add(vf_loss)
        self.entropy_stat.add(entropy)
        self.lr = lr
        self.entropy_coeff = entropy_coeff

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
            'sample_steps': self.sample_total_steps,
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

        for key, value in metric.items():
            if value is not None:
                summary.add_scalar(key, value, self.sample_total_steps)

        logger.info(metric)

    def should_stop(self):
        return self.sample_total_steps >= self.config['max_sample_steps']