def test_dist(self): model = CategoricalGRUModel(output_dim=1, hidden_dim=1) step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1), name='step_hidden', dtype=tf.float32) dist = model.build(self.input_var, self.step_input_var, step_hidden_var).dist assert isinstance(dist, tfp.distributions.OneHotCategorical)
def test_is_pickleable(self): model = CategoricalGRUModel(output_dim=1, hidden_dim=1) step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1), name='step_hidden', dtype=tf.float32) network = model.build(self.input_var, self.step_input_var, step_hidden_var) dist = network.dist # assign bias to all one with tf.compat.v1.variable_scope('CategoricalGRUModel/gru', reuse=True): init_hidden = tf.compat.v1.get_variable('initial_hidden') init_hidden.load(tf.ones_like(init_hidden).eval()) hidden = np.zeros((self.batch_size, 1)) outputs1 = self.sess.run(dist.probs, feed_dict={self.input_var: self.obs_inputs}) output1 = self.sess.run( [network.step_output, network.step_hidden], # yapf: disable feed_dict={ self.step_input_var: self.obs_input, step_hidden_var: hidden }) # yapf: enable h = pickle.dumps(model) with tf.compat.v1.Session(graph=tf.Graph()) as sess: input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5)) model_pickled = pickle.loads(h) input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, None, self.feature_shape), name='input') step_input_var = tf.compat.v1.placeholder( tf.float32, shape=(None, self.feature_shape), name='input') step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1), name='initial_hidden', dtype=tf.float32) network2 = model_pickled.build(input_var, step_input_var, step_hidden_var) dist2 = network2.dist outputs2 = sess.run(dist2.probs, feed_dict={input_var: self.obs_inputs}) output2 = sess.run( [network2.step_output, network2.step_hidden], # yapf: disable feed_dict={ step_input_var: self.obs_input, step_hidden_var: hidden }) # yapf: enable assert np.array_equal(outputs1, outputs2) assert np.array_equal(output1, output2)
def test_output_nonlinearity(self): model = CategoricalGRUModel(output_dim=1, hidden_dim=4, output_nonlinearity=lambda x: x / 2) obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None, 1)) step_obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 1)) step_hidden_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4)) obs = np.ones((1, 1, 1)) dist, _, _, _ = model.build(obs_ph, step_obs_ph, step_hidden_ph) probs = tf.compat.v1.get_default_session().run(dist.probs, feed_dict={obs_ph: obs}) assert probs == [0.5]
def test_output_normalized(self, output_dim): model = CategoricalGRUModel(output_dim=output_dim, hidden_dim=4) obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None, output_dim)) step_obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, output_dim)) step_hidden_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4)) obs = np.ones((1, 1, output_dim)) dist = model.build(obs_ph, step_obs_ph, step_hidden_ph).dist probs = tf.compat.v1.get_default_session().run(tf.reduce_sum( dist.probs), feed_dict={obs_ph: obs}) assert np.isclose(probs, 1.0)
def __init__(self, env_spec, name='CategoricalGRUPolicy', hidden_dim=32, hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.initializers.glorot_uniform(), output_nonlinearity=tf.nn.softmax, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, state_include_action=True, layer_normalization=False): if not isinstance(env_spec.action_space, akro.Discrete): raise ValueError('CategoricalGRUPolicy only works' 'with akro.Discrete action space.') super().__init__(name, env_spec) self._obs_dim = env_spec.observation_space.flat_dim self._action_dim = env_spec.action_space.n self._hidden_dim = hidden_dim self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._recurrent_nonlinearity = recurrent_nonlinearity self._recurrent_w_init = recurrent_w_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._hidden_state_init = hidden_state_init self._hidden_state_init_trainable = hidden_state_init_trainable self._layer_normalization = layer_normalization self._state_include_action = state_include_action if state_include_action: self._input_dim = self._obs_dim + self._action_dim else: self._input_dim = self._obs_dim self._f_step_prob = None self.model = CategoricalGRUModel( output_dim=self._action_dim, hidden_dim=self._hidden_dim, name='prob_network', hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, recurrent_nonlinearity=recurrent_nonlinearity, recurrent_w_init=recurrent_w_init, hidden_state_init=hidden_state_init, hidden_state_init_trainable=hidden_state_init_trainable, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self._prev_actions = None self._prev_hiddens = None
class CategoricalGRUPolicy2(StochasticPolicy2): """Categorical GRU Policy. A policy represented by a Categorical distribution which is parameterized by a Gated Recurrent Unit (GRU). It only works with akro.Discrete action space. Args: env_spec (garage.envs.env_spec.EnvSpec): Environment specification. name (str): Policy name, also the variable scope. hidden_dim (int): Hidden dimension for LSTM cell. hidden_nonlinearity (callable): Activation function for intermediate dense layer(s). It should return a tf.Tensor. Set it to None to maintain a linear activation. hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s). The function should return a tf.Tensor. hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s). The function should return a tf.Tensor. recurrent_nonlinearity (callable): Activation function for recurrent layers. It should return a tf.Tensor. Set it to None to maintain a linear activation. recurrent_w_init (callable): Initializer function for the weight of recurrent layer(s). The function should return a tf.Tensor. output_nonlinearity (callable): Activation function for output dense layer. It should return a tf.Tensor. Set it to None to maintain a linear activation. output_w_init (callable): Initializer function for the weight of output dense layer(s). The function should return a tf.Tensor. output_b_init (callable): Initializer function for the bias of output dense layer(s). The function should return a tf.Tensor. hidden_state_init (callable): Initializer function for the initial hidden state. The functino should return a tf.Tensor. hidden_state_init_trainable (bool): Bool for whether the initial hidden state is trainable. state_include_action (bool): Whether the state includes action. If True, input dimension will be (observation dimension + action dimension). layer_normalization (bool): Bool for using layer normalization or not. """ def __init__(self, env_spec, name='CategoricalGRUPolicy', hidden_dim=32, hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.initializers.glorot_uniform(), output_nonlinearity=tf.nn.softmax, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, state_include_action=True, layer_normalization=False): if not isinstance(env_spec.action_space, akro.Discrete): raise ValueError('CategoricalGRUPolicy only works' 'with akro.Discrete action space.') super().__init__(name, env_spec) self._obs_dim = env_spec.observation_space.flat_dim self._action_dim = env_spec.action_space.n self._hidden_dim = hidden_dim self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._recurrent_nonlinearity = recurrent_nonlinearity self._recurrent_w_init = recurrent_w_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._hidden_state_init = hidden_state_init self._hidden_state_init_trainable = hidden_state_init_trainable self._layer_normalization = layer_normalization self._state_include_action = state_include_action if state_include_action: self._input_dim = self._obs_dim + self._action_dim else: self._input_dim = self._obs_dim self._f_step_prob = None self.model = CategoricalGRUModel( output_dim=self._action_dim, hidden_dim=self._hidden_dim, name='prob_network', hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, recurrent_nonlinearity=recurrent_nonlinearity, recurrent_w_init=recurrent_w_init, hidden_state_init=hidden_state_init, hidden_state_init_trainable=hidden_state_init_trainable, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self._prev_actions = None self._prev_hiddens = None def build(self, state_input, name=None): """Build model. Args: state_input (tf.Tensor) : State input. name (str): Name of the model, which is also the name scope. """ with tf.compat.v1.variable_scope(self.name) as vs: self._variable_scope = vs step_input_var = tf.compat.v1.placeholder(shape=(None, self._input_dim), name='step_input', dtype=tf.float32) step_hidden_var = tf.compat.v1.placeholder( shape=(None, self._hidden_dim), name='step_hidden_input', dtype=tf.float32) self.model.build(state_input, step_input_var, step_hidden_var, name=name) self._f_step_prob = tf.compat.v1.get_default_session().make_callable( [ self.model.networks['default'].step_output, self.model.networks['default'].step_hidden ], feed_list=[step_input_var, step_hidden_var]) @property def vectorized(self): """Vectorized or not. Returns: Bool: True if primitive supports vectorized operations. """ return True def reset(self, do_resets=None): """Reset the policy. Note: If `do_resets` is None, it will be by default np.array([True]), which implies the policy will not be "vectorized", i.e. number of paralle environments for training data sampling = 1. Args: do_resets (numpy.ndarray): Bool that indicates terminal state(s). """ if do_resets is None: do_resets = [True] do_resets = np.asarray(do_resets) if self._prev_actions is None or len(do_resets) != len( self._prev_actions): self._prev_actions = np.zeros( (len(do_resets), self.action_space.flat_dim)) self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim)) self._prev_actions[do_resets] = 0. self._prev_hiddens[do_resets] = self.model.networks[ 'default'].init_hidden.eval() def get_action(self, observation): """Return a single action. Args: observation (numpy.ndarray): Observations. Returns: int: Action given input observation. dict(numpy.ndarray): Distribution parameters. """ actions, agent_infos = self.get_actions([observation]) return actions[0], {k: v[0] for k, v in agent_infos.items()} def get_actions(self, observations): """Return multiple actions. Args: observations (numpy.ndarray): Observations. Returns: list[int]: Actions given input observations. dict(numpy.ndarray): Distribution parameters. """ if self._state_include_action: assert self._prev_actions is not None all_input = np.concatenate([observations, self._prev_actions], axis=-1) else: all_input = observations probs, hidden_vec = self._f_step_prob(all_input, self._prev_hiddens) actions = list(map(self.action_space.weighted_sample, probs)) prev_actions = self._prev_actions self._prev_actions = self.action_space.flatten_n(actions) self._prev_hiddens = hidden_vec agent_info = dict(prob=probs) if self._state_include_action: agent_info['prev_action'] = np.copy(prev_actions) return actions, agent_info @property def recurrent(self): """Recurrent or not. Returns: bool: Whether policy is recurrent or not. """ return True @property def distribution(self): """Policy distribution. Returns: tfp.Distribution.OneHotCategorical: Policy distribution. """ return self.model.networks['default'].dist @property def state_info_specs(self): """State info specifcation. Returns: List[str]: keys and shapes for the information related to the policy's state when taking an action. """ if self._state_include_action: return [ ('prev_action', (self._action_dim, )), ] return [] def clone(self, name): """Return a clone of the policy. It only copies the configuration of the primitive, not the parameters. Args: name (str): Name of the newly created policy. It has to be different from source policy if cloned under the same computational graph. Returns: garage.tf.policies.CategoricalGRUPolicy2: Newly cloned policy. """ return self.__class__( name=name, env_spec=self._env_spec, hidden_dim=self._hidden_dim, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, recurrent_nonlinearity=self._recurrent_nonlinearity, recurrent_w_init=self._recurrent_w_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, hidden_state_init=self._hidden_state_init, hidden_state_init_trainable=self._hidden_state_init_trainable, state_include_action=self._state_include_action, layer_normalization=self._layer_normalization) def __getstate__(self): """Object.__getstate__. Returns: dict: the state to be pickled for the instance. """ new_dict = super().__getstate__() del new_dict['_f_step_prob'] return new_dict