def __init__(self, env_spec, name='CategoricalMLPPolicy', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.softmax, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), layer_normalization=False): assert isinstance( env_spec.action_space, Discrete), ('CategoricalMLPPolicy only works with akro.tf.Discrete' 'action space.') super().__init__(name, env_spec) self.obs_dim = env_spec.observation_space.flat_dim self.action_dim = env_spec.action_space.n self.model = MLPModel(output_dim=self.action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization, name='MLPModel') self._initialize()
def __init__(self, env_spec, name='ContinuousMLPPolicy', hidden_sizes=(64, 64), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.tanh, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), layer_normalization=False): super().__init__(name, env_spec) action_dim = env_spec.action_space.flat_dim self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self.obs_dim = env_spec.observation_space.flat_dim self.model = MLPModel(output_dim=action_dim, name='MLPModel', hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self._initialize()
def test_is_pickleable(self, output_dim, hidden_sizes): model = MLPModel( output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=tf.ones_initializer(), output_w_init=tf.ones_initializer()) outputs = model.build(self.input_var) # assign bias to all one with tf.compat.v1.variable_scope('MLPModel/mlp', reuse=True): bias = tf.compat.v1.get_variable('hidden_0/bias') bias.load(tf.ones_like(bias).eval()) output1 = self.sess.run(outputs, feed_dict={self.input_var: self.obs}) h = pickle.dumps(model) with tf.compat.v1.Session(graph=tf.Graph()) as sess: input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5)) model_pickled = pickle.loads(h) outputs = model_pickled.build(input_var) output2 = sess.run(outputs, feed_dict={input_var: self.obs}) assert np.array_equal(output1, output2)
def __init__(self, env_spec, name='DeterministicMLPPolicy', hidden_sizes=(64, 64), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.tanh, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), input_include_goal=False, layer_normalization=False): super().__init__(name, env_spec) action_dim = env_spec.action_space.flat_dim if input_include_goal: self.obs_dim = env_spec.observation_space.flat_dim_with_keys( ['observation', 'desired_goal']) else: self.obs_dim = env_spec.observation_space.flat_dim self.model = MLPModel(output_dim=action_dim, name='MLPModel', hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self._initialize()
def __init__(self, env_spec, name='discrete_mlp_q_function', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), layer_normalization=False): obs_dim = env_spec.observation_space.shape action_dim = env_spec.action_space.flat_dim self.model = MLPModel(output_dim=action_dim, name=name, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) obs_ph = tf.placeholder(tf.float32, (None, ) + obs_dim, name='obs') with tf.variable_scope(name) as vs: self._variable_scope = vs self.model.build(obs_ph)
def __init__(self, env_spec, name=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), dueling=False, layer_normalization=False): super().__init__(name) self._env_spec = env_spec self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._dueling = dueling self._layer_normalization = layer_normalization self.obs_dim = env_spec.observation_space.shape action_dim = env_spec.action_space.flat_dim if not dueling: self.model = MLPModel(output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) else: self.model = MLPDuelingModel( output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self._network = None self._initialize()
def __init__(self, env_spec, conv_filters, conv_filter_sizes, conv_strides, conv_pad, name='CategoricalCNNPolicy', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.softmax, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), layer_normalization=False): if not isinstance(env_spec.action_space, akro.Discrete): raise ValueError( 'CategoricalCNNPolicy only works with akro.Discrete action ' 'space.') if not isinstance(env_spec.observation_space, akro.Box) or \ not len(env_spec.observation_space.shape) in (2, 3): raise ValueError( '{} can only process 2D, 3D akro.Image or' ' akro.Box observations, but received an env_spec with ' 'observation_space of type {} and shape {}'.format( type(self).__name__, type(env_spec.observation_space).__name__, env_spec.observation_space.shape)) super().__init__(name, env_spec) self.obs_dim = env_spec.observation_space.shape self.action_dim = env_spec.action_space.n self.model = Sequential( CNNModel(filter_dims=conv_filter_sizes, num_filters=conv_filters, strides=conv_strides, padding=conv_pad, hidden_nonlinearity=hidden_nonlinearity, name='CNNModel'), MLPModel(output_dim=self.action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization, name='MLPModel')) self._initialize()
def test_output_values(self, output_dim, hidden_sizes): model = MLPModel(output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=tf.ones_initializer(), output_w_init=tf.ones_initializer()) outputs = model.build(self.input_var) output = self.sess.run(outputs, feed_dict={self.input_var: self.obs}) expected_output = np.full([1, output_dim], 5 * np.prod(hidden_sizes)) assert np.array_equal(output, expected_output)
def test_is_pickleable(self, output_dim, hidden_sizes): model = MLPModel( output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=tf.ones_initializer(), output_w_init=tf.ones_initializer()) model_pickled = pickle.loads(pickle.dumps(model)) with tf.Session(graph=tf.Graph()) as sess: input_var = tf.placeholder(tf.float32, shape=(None, 5)) outputs = model_pickled.build(input_var) output = sess.run(outputs, feed_dict={input_var: self.obs}) expected_output = np.full([1, output_dim], 5 * np.prod(hidden_sizes)) assert np.array_equal(output, expected_output)
def test_comp_mech_policy(self): y1 = 0.1 v1 = 0.1 env = TfEnv(MassSpringEnv_OptK_HwAsPolicy(params)) comp_policy_model = MLPModel(output_dim=1, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_b_init=tf.zeros_initializer(), hidden_w_init=tf.zeros_initializer(), output_b_init=tf.zeros_initializer(), output_w_init=tf.zeros_initializer()) mech_policy_model = MechPolicyModel_OptK_HwAsPolicy(params) with tf.compat.v1.Session() as sess: comp_mech_policy = CompMechPolicy_OptK_HwAsPolicy( name='test_comp_mech_policy', env_spec=env.spec, comp_policy_model=comp_policy_model, mech_policy_model=mech_policy_model) actions = comp_mech_policy.get_actions([[y1, v1]]) print('actions: ', actions) # self.assertTrue(np.allclose(actions[1]['mean'], np.array([[params.half_force_range*0-params.k_init*y1, 0.0]]))) self.assertTrue( np.allclose( actions[1]['log_std'], np.array([[ params.f_log_std_init_action, params.f_log_std_init_auxiliary ]]))) action = comp_mech_policy.get_action([y1, v1]) print('single action: ', action) # self.assertTrue(np.allclose(action[1]['mean'], np.array([params.half_force_range*0-params.k_init*y1, 0.0]))) self.assertTrue( np.allclose( action[1]['log_std'], np.array([ params.f_log_std_init_action, params.f_log_std_init_auxiliary ]))) print(comp_mech_policy.distribution)
def test_comp_mech_policy(self): print('\n Testing CompMechPolicy_OptK_HwAsAction ...') y1 = 0.1 v1 = 0.1 env = TfEnv(MassSpringEnv_OptK_HwAsAction(params)) comp_policy_model = MLPModel(output_dim=1, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_b_init=tf.zeros_initializer(), hidden_w_init=tf.zeros_initializer(), output_b_init=tf.zeros_initializer(), output_w_init=tf.zeros_initializer()) mech_policy_model = MechPolicyModel_OptK_HwAsAction(params=params) with tf.compat.v1.Session() as sess: comp_mech_policy = CompMechPolicy_OptK_HwAsAction( name='test_comp_mech_policy', env_spec=env.spec, comp_policy_model=comp_policy_model, mech_policy_model=mech_policy_model) actions = comp_mech_policy.get_actions([[y1, v1]]) print('actions: ', actions) # self.assertTrue(np.allclose(actions[1]['mean'], np.array([[0.0, params.k_init]]))) self.assertTrue( np.allclose(actions[1]['log_std'], np.array([[params.f_log_std_init_action] + [ params.k_log_std_init_action, ] * params.n_springs]), atol=1e-3)) action = comp_mech_policy.get_action([y1, v1]) print('single action: ', action) # self.assertTrue(np.allclose(actions[1]['mean'], np.array([0.0, params.k_init]))) self.assertTrue( np.allclose(actions[1]['log_std'], np.array([[params.f_log_std_init_action] + [ params.k_log_std_init_action, ] * params.n_springs]), atol=1e-3)) print(comp_mech_policy.distribution)
def __init__(self, env_spec, name='MLPTerminalFunction', hidden_sizes=(20, 20), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), input_include_goal=False, layer_normalization=False): super().__init__(name) self._env_spec = env_spec self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._input_include_goal = input_include_goal self._layer_normalization = layer_normalization if self._input_include_goal: self._obs_dim = env_spec.observation_space.flat_dim_with_keys( ['observation', 'desired_goal']) else: self._obs_dim = env_spec.observation_space.flat_dim self._action_dim = env_spec.action_space.flat_dim self.model = MLPModel(output_dim=2, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self._initialize()
def __init__(self, env_spec, conv_filters, conv_filter_sizes, conv_strides, conv_pad, name='CategoricalConvPolicy', hidden_sizes=[], hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.softmax, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), layer_normalization=False): assert isinstance(env_spec.action_space, akro.Discrete), ( 'CategoricalConvPolicy only works with akro.Discrete action ' 'space.') super().__init__(name, env_spec) self.obs_dim = env_spec.observation_space.shape self.action_dim = env_spec.action_space.n self.model = Sequential( CNNModel( filter_dims=conv_filter_sizes, num_filters=conv_filters, strides=conv_strides, padding=conv_pad, hidden_nonlinearity=hidden_nonlinearity, name='CNNModel'), MLPModel( output_dim=self.action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization, name='MLPModel')) self._initialize()
def __init__(self, env_spec, name="DeterministicMLPPolicy", hidden_sizes=(64, 64), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh, input_include_goal=False, layer_normalization=False): super().__init__(name, env_spec) action_dim = env_spec.action_space.flat_dim if input_include_goal: self.obs_dim = env_spec.observation_space.flat_dim_with_keys( ["observation", "desired_goal"]) else: self.obs_dim = env_spec.observation_space.flat_dim self.model = MLPModel(output_dim=action_dim, name=name, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, layer_normalization=layer_normalization) self._initialize()
def __init__(self, env_spec, name="CategoricalMLPPolicy", hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=tf.nn.softmax, layer_normalization=False): assert isinstance( env_spec.action_space, Discrete), ("CategoricalMLPPolicy only works with akro.tf.Discrete" "action space.") super().__init__(name, env_spec) self.obs_dim = env_spec.observation_space.flat_dim self.action_dim = env_spec.action_space.n self.model = MLPModel( output_dim=self.action_dim, name=name, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, layer_normalization=layer_normalization) self._initialize()
def __init__(self, env_spec, filters, strides, hidden_sizes=(256, ), name=None, padding='SAME', max_pooling=False, pool_strides=(2, 2), pool_shapes=(2, 2), cnn_hidden_nonlinearity=tf.nn.relu, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(), output_b_init=tf.zeros_initializer(), dueling=False, layer_normalization=False): if not isinstance(env_spec.observation_space, akro.Box) or \ not len(env_spec.observation_space.shape) in (2, 3): raise ValueError( '{} can only process 2D, 3D akro.Image or' ' akro.Box observations, but received an env_spec with ' 'observation_space of type {} and shape {}'.format( type(self).__name__, type(env_spec.observation_space).__name__, env_spec.observation_space.shape)) super().__init__(name) self._env_spec = env_spec self._action_dim = env_spec.action_space.n self._filters = filters self._strides = strides self._hidden_sizes = hidden_sizes self._padding = padding self._max_pooling = max_pooling self._pool_strides = pool_strides self._pool_shapes = pool_shapes self._cnn_hidden_nonlinearity = cnn_hidden_nonlinearity self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self._dueling = dueling self.obs_dim = self._env_spec.observation_space.shape action_dim = self._env_spec.action_space.flat_dim if not max_pooling: cnn_model = CNNModel(filters=filters, strides=strides, padding=padding, hidden_nonlinearity=cnn_hidden_nonlinearity) else: cnn_model = CNNModelWithMaxPooling( filters=filters, strides=strides, padding=padding, pool_strides=pool_strides, pool_shapes=pool_shapes, hidden_nonlinearity=cnn_hidden_nonlinearity) if not dueling: output_model = MLPModel(output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) else: output_model = MLPDuelingModel( output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self.model = Sequential(cnn_model, output_model) self._initialize()
def __init__(self, env_spec, filter_dims, num_filters, strides, hidden_sizes=[256], name=None, padding='SAME', max_pooling=False, pool_strides=(2, 2), pool_shapes=(2, 2), cnn_hidden_nonlinearity=tf.nn.relu, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), dueling=False, layer_normalization=False): super().__init__(name) self._env_spec = env_spec self._action_dim = env_spec.action_space.n self._filter_dims = filter_dims self._num_filters = num_filters self._strides = strides self._hidden_sizes = hidden_sizes self._padding = padding self._max_pooling = max_pooling self._pool_strides = pool_strides self._pool_shapes = pool_shapes self._cnn_hidden_nonlinearity = cnn_hidden_nonlinearity self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self._dueling = dueling self.obs_dim = self._env_spec.observation_space.shape action_dim = self._env_spec.action_space.flat_dim if not max_pooling: cnn_model = CNNModel(filter_dims=filter_dims, num_filters=num_filters, strides=strides, padding=padding, hidden_nonlinearity=cnn_hidden_nonlinearity) else: cnn_model = CNNModelWithMaxPooling( filter_dims=filter_dims, num_filters=num_filters, strides=strides, padding=padding, pool_strides=pool_strides, pool_shapes=pool_shapes, hidden_nonlinearity=cnn_hidden_nonlinearity) if not dueling: output_model = MLPModel(output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) else: output_model = MLPDuelingModel( output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization) self.model = Sequential(cnn_model, output_model) self._initialize()