def _build_net(self, reuse=None, custom_getter=None, trainable=None): """ Set up q network based on class attributes. This function uses layers defined in rllab.tf. Args: reuse: A bool indicates whether reuse variables in the same scope. custom_getter: A customized getter object used to get variables. trainable: A bool indicates whether variables are trainable. """ with tf.variable_scope(self.name, reuse=reuse, custom_getter=custom_getter): l_obs = L.InputLayer(shape=(None, self._obs_dim), name="obs") l_action = L.InputLayer(shape=(None, self._action_dim), name="actions") n_layers = len(self._hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (self._action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(self._hidden_sizes): if self._batch_norm: l_hidden = batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=self._hidden_nonlinearity, trainable=trainable, name="hidden_%d" % (idx + 1)) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer(l_hidden, num_units=1, nonlinearity=self._output_nonlinearity, trainable=trainable, name="output") output_var = L.get_output(l_output) self._f_qval = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action LayersPowered.__init__(self, [l_output])
def build_net(self, trainable=True, name=None): """ Set up q network based on class attributes. This function uses layers defined in garage.tf. Args: reuse: A bool indicates whether reuse variables in the same scope. trainable: A bool indicates whether variables are trainable. """ with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, self._obs_dim), name="obs") l_action = L.InputLayer(shape=(None, self._action_dim), name="actions") n_layers = len(self._hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (self._action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(self._hidden_sizes): if self._batch_norm: l_hidden = batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=self._hidden_nonlinearity, trainable=trainable, name="hidden_%d" % (idx + 1)) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer(l_hidden, num_units=1, nonlinearity=self._output_nonlinearity, trainable=trainable, name="output") output_var = L.get_output(l_output) f_qval = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var) output_layer = l_output obs_layer = l_obs action_layer = l_action return f_qval, output_layer, obs_layer, action_layer
def build_net(self, trainable=True, name=None): """ Set up q network based on class attributes. This function uses layers defined in garage.tf. Args: reuse: A bool indicates whether reuse variables in the same scope. trainable: A bool indicates whether variables are trainable. """ input_shape = self._env_spec.observation_space.shape assert len(input_shape) in [2, 3] if len(input_shape) == 2: input_shape = (1, ) + input_shape with tf.variable_scope(name): l_in = layers.InputLayer(shape=(None, self._obs_dim), name="obs") l_hid = layers.reshape(l_in, ([0], ) + input_shape, name="reshape_input") if self._batch_norm: l_hid = layers.batch_norm(l_hid) for idx, conv_filter, filter_size, stride, pad in zip( range(len(self._conv_filters)), self._conv_filters, self._conv_filter_sizes, self._conv_strides, self._conv_pads, ): l_hid = layers.Conv2DLayer( l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=self._hidden_nonlinearity, name="conv_hidden_%d" % idx, weight_normalization=self._weight_normalization, trainable=trainable, ) if self._pooling: l_hid = layers.Pool2DLayer(l_hid, pool_size=self._pool_size) if self._batch_norm: l_hid = layers.batch_norm(l_hid) l_hid = layers.flatten(l_hid, name="conv_flatten") l_action = layers.InputLayer(shape=(None, self._action_dim), name="actions") n_layers = len(self._hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (self._action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 for idx, size in enumerate(self._hidden_sizes): if self._batch_norm: l_hid = batch_norm(l_hid) if idx == action_merge_layer: l_hid = layers.ConcatLayer([l_hid, l_action]) l_hid = layers.DenseLayer( l_hid, num_units=size, nonlinearity=self._hidden_nonlinearity, trainable=trainable, name="hidden_%d" % (idx + 1)) if action_merge_layer == n_layers: l_hid = layers.ConcatLayer([l_hid, l_action]) l_output = layers.DenseLayer( l_hid, num_units=1, nonlinearity=self._output_nonlinearity, trainable=trainable, name="output") output_var = layers.get_output(l_output) f_qval = tensor_utils.compile_function( [l_in.input_var, l_action.input_var], output_var) output_layer = l_output obs_layer = l_in action_layer = l_action return f_qval, output_layer, obs_layer, action_layer