示例#1
0
 def run_training(self):
     self.summary_writer = tf.summary.FileWriter(self.model_path)
     tf.reset_default_graph()
     with tf.Session() as sess:
         self.dqn.build()
         model_saver = tf.train.Saver(max_to_keep=self.max_num_checkpoints)
         # The schedule for the epsilon in epsilon greedy policy.
         self.exploration = schedules.PiecewiseSchedule(
             [(0, 1.0), (int(self.num_episodes / 2), 0.1),
              (self.num_episodes, 0.01)],
             outside_value=0.01)
         if self.prioritized:
             self.memory = replay_buffer.PrioritizedReplayBuffer(
                 self.replay_buffer_size, self.prioritized_alpha)
             self.beta_schedule = schedules.LinearSchedule(
                 self.num_episodes,
                 initial_p=self.prioritized_beta,
                 final_p=0)
         else:
             self.memory = replay_buffer.ReplayBuffer(
                 self.replay_buffer_size)
             self.beta_schedule = None
         sess.run(tf.global_variables_initializer())
         sess.run(self.dqn.update_op)
         self.global_step = 0
         for self.episode in range(self.num_episodes):
             self.global_step = self._episode()
             print(self.episode, self.global_step)
             if (self.episode + 1) % self.update_frequency == 0:
                 sess.run(self.dqn.update_op)
             if (self.episode + 1) % self.save_frequency == 0:
                 model_saver.save(sess,
                                  os.path.join(self.model_path,
                                               self.model_name + '_ckpt'),
                                  global_step=self.episode + 1)
示例#2
0
def schedule_from_config(config):
  if config.get("type") == "linear":
    return schedules.LinearSchedule(
        config.get("num_steps"), config.get("final"), config.get("initial"))
  elif config.get("type") == "constant":
    return schedules.ConstantSchedule(config.get("value"))
  else:
    raise ValueError("Unsupported schedule type: {}".format(config.get("type")))
def run_training(hparams, environment, dqn):
    """Runs the training procedure.

  Briefly, the agent runs the action network to get an action to take in
  the environment. The state transition and reward are stored in the memory.
  Periodically the agent samples a batch of samples from the memory to
  update(train) its Q network. Note that the Q network and the action network
  share the same set of parameters, so the action network is also updated by
  the samples of (state, action, next_state, reward) batches.


  Args:
    hparams: tf.HParams. The hyper parameters of the model.
    environment: molecules.Molecule. The environment to run on.
    dqn: An instance of the DeepQNetwork class.

  Returns:
    None
  """
    summary_writer = tf.summary.FileWriter(FLAGS.model_dir)
    tf.reset_default_graph()
    with tf.Session() as sess:
        dqn.build()
        model_saver = tf.Saver(max_to_keep=hparams.max_num_checkpoints)
        # The schedule for the epsilon in epsilon greedy policy.
        exploration = schedules.PiecewiseSchedule(
            [(0, 1.0), (int(hparams.num_episodes / 2), 0.1),
             (hparams.num_episodes, 0.01)],
            outside_value=0.01)
        if hparams.prioritized:
            memory = replay_buffer.PrioritizedReplayBuffer(
                hparams.replay_buffer_size, hparams.prioritized_alpha)
            beta_schedule = schedules.LinearSchedule(
                hparams.num_episodes,
                initial_p=hparams.prioritized_beta,
                final_p=0)
        else:
            memory = replay_buffer.ReplayBuffer(hparams.replay_buffer_size)
            beta_schedule = None
        sess.run(tf.global_variables_initializer())
        sess.run(dqn.update_op)
        global_step = 0
        for episode in range(hparams.num_episodes):
            global_step = _episode(environment=environment,
                                   dqn=dqn,
                                   memory=memory,
                                   episode=episode,
                                   global_step=global_step,
                                   hparams=hparams,
                                   summary_writer=summary_writer,
                                   exploration=exploration,
                                   beta_schedule=beta_schedule)
            if (episode + 1) % hparams.update_frequency == 0:
                sess.run(dqn.update_op)
            if (episode + 1) % hparams.save_frequency == 0:
                model_saver.save(sess,
                                 os.path.join(FLAGS.model_dir, 'ckpt'),
                                 global_step=global_step)