def test_is_pickleable(self, output_dim, hidden_sizes): model = CategoricalMLPModel(output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=tf.ones_initializer(), output_w_init=tf.ones_initializer()) dist = model.build(self.input_var) # assign bias to all one with tf.compat.v1.variable_scope('CategoricalMLPModel/mlp', reuse=True): bias = tf.compat.v1.get_variable('hidden_0/bias') bias.load(tf.ones_like(bias).eval()) output1 = self.sess.run(dist.logits, feed_dict={self.input_var: self.obs}) h = pickle.dumps(model) with tf.compat.v1.Session(graph=tf.Graph()) as sess: input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5)) model_pickled = pickle.loads(h) dist2 = model_pickled.build(input_var) output2 = sess.run(dist2.logits, feed_dict={input_var: self.obs}) assert np.array_equal(output1, output2)
def test_output_nonlinearity(self): model = CategoricalMLPModel(output_dim=1, output_nonlinearity=lambda x: x / 2) obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 1)) obs = np.ones((1, 1)) dist = model.build(obs_ph) probs = tf.compat.v1.get_default_session().run(dist.probs, feed_dict={obs_ph: obs}) assert probs == [0.5]
def test_output_normalized(self, output_dim): model = CategoricalMLPModel(output_dim=output_dim) obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, output_dim)) obs = np.ones((1, output_dim)) dist = model.build(obs_ph) probs = tf.compat.v1.get_default_session().run(tf.reduce_sum( dist.probs), feed_dict={obs_ph: obs}) assert np.isclose(probs, 1.0)
def __init__(self, env_spec, name='CategoricalMLPPolicy', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), layer_normalization=False): if not isinstance(env_spec.action_space, akro.Discrete): raise ValueError('CategoricalMLPPolicy only works' 'with akro.Discrete action space.') super().__init__(name, env_spec) self._obs_dim = env_spec.observation_space.flat_dim self._action_dim = env_spec.action_space.n self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self._f_prob = None self._dist = None self.model = CategoricalMLPModel( output_dim=self._action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization, name='CategoricalMLPModel') self._initialize()
def test_dist(self): model = CategoricalMLPModel(output_dim=1) dist = model.build(self.input_var) assert isinstance(dist, tfp.distributions.OneHotCategorical)
class CategoricalMLPPolicy2(StochasticPolicy2): """Categorical MLP Policy. A policy represented by a Categorical distribution which is parameterized by a multilayer perceptron (MLP). It only works with akro.Discrete action space. Args: env_spec (garage.envs.env_spec.EnvSpec): Environment specification. name (str): Policy name, also the variable scope. hidden_sizes (list[int]): Output dimension of dense layer(s). For example, (32, 32) means the MLP of this policy consists of two hidden layers, each with 32 hidden units. hidden_nonlinearity (callable): Activation function for intermediate dense layer(s). It should return a tf.Tensor. Set it to None to maintain a linear activation. hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s). The function should return a tf.Tensor. hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s). The function should return a tf.Tensor. output_nonlinearity (callable): Activation function for output dense layer. It should return a tf.Tensor. Set it to None to maintain a linear activation. output_w_init (callable): Initializer function for the weight of output dense layer(s). The function should return a tf.Tensor. output_b_init (callable): Initializer function for the bias of output dense layer(s). The function should return a tf.Tensor. layer_normalization (bool): Bool for using layer normalization or not. """ def __init__(self, env_spec, name='CategoricalMLPPolicy', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), layer_normalization=False): if not isinstance(env_spec.action_space, akro.Discrete): raise ValueError('CategoricalMLPPolicy only works' 'with akro.Discrete action space.') super().__init__(name, env_spec) self.obs_dim = env_spec.observation_space.flat_dim self.action_dim = env_spec.action_space.n self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self._f_prob = None self._dist = None self.model = CategoricalMLPModel( output_dim=self.action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization, name='CategoricalMLPModel') def build(self, state_input, name=None): """Build model. Args: state_input (tf.Tensor) : State input. name (str): Name of the model, which is also the name scope. """ with tf.compat.v1.variable_scope(self.name) as vs: self._variable_scope = vs self._dist = self.model.build(state_input, name=name) self._f_prob = tf.compat.v1.get_default_session().make_callable( [tf.argmax(self._dist.sample(), -1), self._dist.probs], feed_list=[state_input]) @property def distribution(self): """Policy distribution. Returns: tfp.Distribution.OneHotCategorical: Policy distribution. """ return self._dist @property def vectorized(self): """Vectorized or not. Returns: Bool: True if primitive supports vectorized operations. """ return True def get_action(self, observation): """Return a single action. Args: observation (numpy.ndarray): Observations. Returns: int: Action given input observation. dict(numpy.ndarray): Distribution parameters. """ sample, prob = self._f_prob([observation]) return sample[0], dict(prob=prob[0]) def get_actions(self, observations): """Return multiple actions. Args: observations (numpy.ndarray): Observations. Returns: list[int]: Actions given input observations. dict(numpy.ndarray): Distribution parameters. """ samples, probs = self._f_prob(observations) return samples, dict(prob=probs) def get_regularizable_vars(self): """Get regularizable weight variables under the Policy scope. Returns: list[tf.Tensor]: Trainable variables. """ trainable = self.get_trainable_vars() return [ var for var in trainable if 'hidden' in var.name and 'kernel' in var.name ] def clone(self, name): """Return a clone of the policy. It only copies the configuration of the primitive, not the parameters. Args: name (str): Name of the newly created policy. It has to be different from source policy if cloned under the same computational graph. Returns: garage.tf.policies.Policy: Newly cloned policy. """ return self.__class__(name=name, env_spec=self._env_spec, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization) def __getstate__(self): """Object.__getstate__. Returns: dict: State dictionary. """ new_dict = super().__getstate__() del new_dict['_f_prob'] del new_dict['_dist'] return new_dict