def _build(self, state_input, name=None): """Build model given input placeholder(s). Args: state_input (tf.Tensor): Tensor input for state. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tf.Tensor: Tensor output of the model. """ del name return cnn_with_max_pooling( input_var=state_input, filter_dims=self._filter_dims, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, num_filters=self._num_filters, strides=self._strides, padding=self._padding, pool_shapes=self._pool_shapes, pool_strides=self._pool_strides, name='cnn')
def test_output_with_max_pooling(self, filters, in_channels, strides, pool_shape, pool_stride): dim_prod = self.input_width * self.input_height * 3 input_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, dim_prod), name='input_flattened') # Build a cnn with random filter weights with tf.compat.v1.variable_scope('CNN'): self.cnn2 = cnn_with_max_pooling( input_var=input_ph, input_dim=self.input_dim, filters=filters, strides=strides, name='cnn1', pool_shapes=(pool_shape, pool_shape), pool_strides=(pool_stride, pool_stride), padding='VALID', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=self.hidden_nonlinearity) self.sess.run(tf.compat.v1.global_variables_initializer()) obs_input = np.ones((self.batch_size, dim_prod)) result = self.sess.run(self.cnn2, feed_dict={input_ph: obs_input}) two_layer = len(filters) == 2 # get weight values with tf.compat.v1.variable_scope('CNN', reuse=True): h0_w = tf.compat.v1.get_variable('cnn1/h0/weight').eval() h0_b = tf.compat.v1.get_variable('cnn1/h0/bias').eval() if two_layer: h1_w = tf.compat.v1.get_variable('cnn1/h1/weight').eval() h1_b = tf.compat.v1.get_variable('cnn1/h1/bias').eval() filter_weights = (h0_w, h1_w) if two_layer else (h0_w, ) filter_bias = (h0_b, h1_b) if two_layer else (h0_b, ) input_val = self.obs_input # convolution according to TensorFlow's approach # and perform max pooling on each layer for filter_iter, filter_weight, _filter_bias, in_channel in zip( filters, filter_weights, filter_bias, in_channels): input_val = convolve(_input=input_val, filter_weights=(filter_weight, ), filter_bias=(_filter_bias, ), strides=strides, filters=(filter_iter, ), in_channels=(in_channel, ), hidden_nonlinearity=self.hidden_nonlinearity) # max pooling input_val = max_pooling(_input=input_val, pool_shape=pool_shape, pool_stride=pool_stride) # flatten dense_out = input_val.reshape((self.batch_size, -1)).astype(np.float32) np.testing.assert_array_equal(dense_out, result)
def test_invalid_padding_max_pooling(self): with pytest.raises(ValueError): with tf.compat.v1.variable_scope('CNN'): self.cnn = cnn_with_max_pooling(input_var=self._input_ph, filters=((32, (3, 3)), ), strides=(1, ), name='cnn', pool_shapes=(1, 1), pool_strides=(1, 1), padding='UNKNOWN')
def _build(self, state_input, name=None): return cnn_with_max_pooling( input_var=state_input, filter_dims=self._filter_dims, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, num_filters=self._num_filters, strides=self._strides, padding=self._padding, pool_shapes=self._pool_shapes, pool_strides=self._pool_strides, name='cnn')