def snconv(self, x, filters, kernel_size, strides=1, padding='same', use_bias=True, kernel_initializer=tc.layers.xavier_initializer(), name=None): """ Spectral normalized convolutional layer """ name = self.get_name(name, 'snconv') if isinstance(kernel_size, list): assert_colorize(len(kernel_size) == 2) H, W = kernel_size else: assert_colorize(isinstance(kernel_size, int)) H = W = kernel_size with tf.variable_scope(name): if padding.lower() != 'same' and padding.lower() != 'valid': x = tf_utils.padding(x, kernel_size, strides, mode=padding) padding = 'valid' w = tf.get_variable('weight', shape=[H, W, x.shape[-1], filters], initializer=kernel_initializer, regularizer=self.l2_regularizer) w = tf_utils.spectral_norm(w) x = tf.nn.conv2d(x, w, strides=(1, strides, strides, 1), padding=padding.upper()) if use_bias: b = tf.get_variable('bias', [filters], initializer=tf.zeros_initializer()) x = tf.nn.bias_add(x, b) return x
def snconvtrans(self, x, filters, kernel_size, strides, padding='same', use_bias=True, kernel_initializer=tc.layers.xavier_initializer(), name=None): name = self.get_name(name, 'snconvtrans') if isinstance(kernel_size, list): assert_colorize(len(kernel_size) == 2) k_h, k_w = kernel_size else: assert_colorize(isinstance(kernel_size, int)) k_h = k_w = kernel_size B, H, W, _ = x.shape.as_list() # Compute output shape if padding.lower() == 'valid': output_shape = [B, (H-1) * strides + k_h, (W-1) * strides + k_w, filters] else: output_shape = [B, H * strides, W * strides, filters] padding = 'SAME' # treat all other forms padding as same with tf.variable_scope(name): w = tf.get_variable('weight', shape=[k_h, k_w, filters, x.shape[-1]], initializer=kernel_initializer, regularizer=self.l2_regularizer) w = tf_utils.spectral_norm(w) x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, strides, strides, 1], padding=padding.upper()) if use_bias: b = tf.get_variable('bias', [filters], initializer=tf.zeros_initializer()) x = tf.nn.bias_add(x, b) return x
def embedding(self, x, n_classes, embedding_size, sn, name='embedding'): with tf.variable_scope(name): embedding_map = tf.get_variable(name='embedding_map', shape=[n_classes, embedding_size], initializer=tc.layers.xavier_initializer()) if sn: embedding_map_trans = tf_utils.spectral_norm(tf.transpose(embedding_map)) embedding_map = tf.transpose(embedding_map_trans) return tf.nn.embedding_lookup(embedding_map, x)
def sndense(self, x, units, use_bias=True, kernel_initializer=tc.layers.xavier_initializer(), name=None): name = self.get_name(name, 'sndense') with tf.variable_scope(name): w = tf.get_variable('weight', shape=[x.shape[-1], units], initializer=kernel_initializer, regularizer=self.l2_regularizer) w = tf_utils.spectral_norm(w) x = tf.matmul(x, w) if use_bias: b = tf.get_variable('bias', [units], initializer=tf.zeros_initializer()) x = x + b return x
def call(self, x): w = tf_utils.spectral_norm(self.kernel, self.u, self.iterations) x = tf.nn.conv2d(x, w, strides=self.strides, padding=self.padding) if self.use_bias: x = tf.nn.bias_add(x, self.bias) return x
def call(self, x): w = tf_utils.spectral_norm(self.kernel, self.u, self.iterations) x = tf.matmul(x, w) if self.use_bias: x = tf.nn.bias_add(x, self.bias) return x