def __init__( self, name, env_spec, hidden_dim=32, feature_network=None, state_include_action=True, hidden_nonlinearity=tf.tanh, gru_layer_cls=L.GRULayer, ): """ :param env_spec: A spec for the env. :param hidden_dim: dimension of hidden layer :param hidden_nonlinearity: nonlinearity used for each hidden layer :return: """ with tf.variable_scope(name): assert isinstance(env_spec.action_space, Discrete) Serializable.quick_init(self, locals()) super(CategoricalGRUPolicy, self).__init__(env_spec) obs_dim = env_spec.observation_space.flat_dim action_dim = env_spec.action_space.flat_dim if state_include_action: input_dim = obs_dim + action_dim else: input_dim = obs_dim l_input = L.InputLayer(shape=(None, None, input_dim), name="input") if feature_network is None: feature_dim = input_dim l_flat_feature = None l_feature = l_input else: feature_dim = feature_network.output_layer.output_shape[-1] l_flat_feature = feature_network.output_layer l_feature = L.OpLayer( l_flat_feature, extras=[l_input], name="reshape_feature", op=lambda flat_feature, input: tf.reshape( flat_feature, tf.pack([ tf.shape(input)[0], tf.shape(input)[1], feature_dim ])), shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)) prob_network = GRUNetwork(input_shape=(feature_dim, ), input_layer=l_feature, output_dim=env_spec.action_space.n, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.softmax, gru_layer_cls=gru_layer_cls, name="prob_network") self.prob_network = prob_network self.feature_network = feature_network self.l_input = l_input self.state_include_action = state_include_action flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input") if feature_network is None: feature_var = flat_input_var else: feature_var = L.get_output( l_flat_feature, {feature_network.input_layer: flat_input_var}) self.f_step_prob = tensor_utils.compile_function( [ flat_input_var, prob_network.step_prev_hidden_layer.input_var ], L.get_output([ prob_network.step_output_layer, prob_network.step_hidden_layer ], {prob_network.step_input_layer: feature_var})) self.input_dim = input_dim self.action_dim = action_dim self.hidden_dim = hidden_dim self.prev_actions = None self.prev_hiddens = None self.dist = RecurrentCategorical(env_spec.action_space.n) out_layers = [prob_network.output_layer] if feature_network is not None: out_layers.append(feature_network.output_layer) LayersPowered.__init__(self, out_layers)
def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu, lstm_layer_cls=L.LSTMLayer, output_nonlinearity=None, input_var=None, input_layer=None, forget_bias=1.0, use_peepholes=False, layer_args=None): with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input") else: l_in = input_layer l_step_input = L.InputLayer(shape=(None, ) + input_shape, name="step_input") # contains previous hidden and cell state l_step_prev_state = L.InputLayer(shape=(None, hidden_dim * 2), name="step_prev_state") if layer_args is None: layer_args = dict() l_lstm = lstm_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False, name="lstm", forget_bias=forget_bias, cell_init_trainable=False, use_peepholes=use_peepholes, **layer_args) l_lstm_flat = L.ReshapeLayer(l_lstm, shape=(-1, hidden_dim), name="lstm_flat") l_output_flat = L.DenseLayer(l_lstm_flat, num_units=output_dim, nonlinearity=output_nonlinearity, name="output_flat") l_output = L.OpLayer( l_output_flat, op=lambda flat_output, l_input: tf.reshape( flat_output, tf.pack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))), shape_op=lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]), extras=[l_in], name="output") l_step_state = l_lstm.get_step_layer(l_step_input, l_step_prev_state, name="step_state") l_step_hidden = L.SliceLayer(l_step_state, indices=slice(hidden_dim), name="step_hidden") l_step_cell = L.SliceLayer(l_step_state, indices=slice(hidden_dim, None), name="step_cell") l_step_output = L.DenseLayer(l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b, name="step_output") self._l_in = l_in self._hid_init_param = l_lstm.h0 self._cell_init_param = l_lstm.c0 self._l_lstm = l_lstm self._l_out = l_output self._l_step_input = l_step_input self._l_step_prev_state = l_step_prev_state self._l_step_hidden = l_step_hidden self._l_step_cell = l_step_cell self._l_step_state = l_step_state self._l_step_output = l_step_output self._hidden_dim = hidden_dim
def __init__( self, name, env_spec, hidden_dim=32, feature_network=None, state_include_action=True, hidden_nonlinearity=tf.tanh, learn_std=True, init_std=1.0, output_nonlinearity=None, lstm_layer_cls=L.LSTMLayer, ): """ :param env_spec: A spec for the env. :param hidden_dim: dimension of hidden layer :param hidden_nonlinearity: nonlinearity used for each hidden layer :return: """ with tf.variable_scope(name): Serializable.quick_init(self, locals()) super(GaussianLSTMPolicy, self).__init__(env_spec) obs_dim = env_spec.observation_space.flat_dim action_dim = env_spec.action_space.flat_dim if state_include_action: input_dim = obs_dim + action_dim else: input_dim = obs_dim l_input = L.InputLayer(shape=(None, None, input_dim), name="input") if feature_network is None: feature_dim = input_dim l_flat_feature = None l_feature = l_input else: feature_dim = feature_network.output_layer.output_shape[-1] l_flat_feature = feature_network.output_layer l_feature = L.OpLayer( l_flat_feature, extras=[l_input], name="reshape_feature", op=lambda flat_feature, input: tf.reshape( flat_feature, tf.pack([ tf.shape(input)[0], tf.shape(input)[1], feature_dim ])), shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)) mean_network = LSTMNetwork(input_shape=(feature_dim, ), input_layer=l_feature, output_dim=action_dim, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, lstm_layer_cls=lstm_layer_cls, name="mean_network") l_log_std = L.ParamLayer( mean_network.input_layer, num_units=action_dim, param=tf.constant_initializer(np.log(init_std)), name="output_log_std", trainable=learn_std, ) l_step_log_std = L.ParamLayer( mean_network.step_input_layer, num_units=action_dim, param=l_log_std.param, name="step_output_log_std", trainable=learn_std, ) self.mean_network = mean_network self.feature_network = feature_network self.l_input = l_input self.state_include_action = state_include_action flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input") if feature_network is None: feature_var = flat_input_var else: feature_var = L.get_output( l_flat_feature, {feature_network.input_layer: flat_input_var}) self.f_step_mean_std = tensor_utils.compile_function( [ flat_input_var, mean_network.step_prev_hidden_layer.input_var, mean_network.step_prev_cell_layer.input_var ], L.get_output([ mean_network.step_output_layer, l_step_log_std, mean_network.step_hidden_layer, mean_network.step_cell_layer ], {mean_network.step_input_layer: feature_var})) self.l_log_std = l_log_std self.input_dim = input_dim self.action_dim = action_dim self.hidden_dim = hidden_dim self.prev_actions = None self.prev_hiddens = None self.prev_cells = None self.dist = RecurrentDiagonalGaussian(action_dim) out_layers = [mean_network.output_layer, l_log_std] if feature_network is not None: out_layers.append(feature_network.output_layer) LayersPowered.__init__(self, out_layers)
def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu, gru_layer_cls=L.GRULayer, output_nonlinearity=None, input_var=None, input_layer=None, layer_args=None): with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input") else: l_in = input_layer l_step_input = L.InputLayer(shape=(None, ) + input_shape, name="step_input") l_step_prev_state = L.InputLayer(shape=(None, hidden_dim), name="step_prev_state") if layer_args is None: layer_args = dict() l_gru = gru_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False, name="gru", **layer_args) l_gru_flat = L.ReshapeLayer(l_gru, shape=(-1, hidden_dim), name="gru_flat") l_output_flat = L.DenseLayer(l_gru_flat, num_units=output_dim, nonlinearity=output_nonlinearity, name="output_flat") l_output = L.OpLayer( l_output_flat, op=lambda flat_output, l_input: tf.reshape( flat_output, tf.pack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))), shape_op=lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]), extras=[l_in], name="output") l_step_state = l_gru.get_step_layer(l_step_input, l_step_prev_state, name="step_state") l_step_hidden = l_step_state l_step_output = L.DenseLayer(l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b, name="step_output") self._l_in = l_in self._hid_init_param = l_gru.h0 self._l_gru = l_gru self._l_out = l_output self._l_step_input = l_step_input self._l_step_prev_state = l_step_prev_state self._l_step_hidden = l_step_hidden self._l_step_state = l_step_state self._l_step_output = l_step_output self._hidden_dim = hidden_dim