def create_layer(self, input): # print('convd2: input_shape: {}'.format(utils.get_incoming_shape(input))) self.input_shape = utils.get_incoming_shape(input) number_of_input_channels = self.input_shape[3] with tf.variable_scope('conv', reuse=False): # set the W.shape[1] to 1 if isinstance(self.initializer, tf.Tensor): W = tf.get_variable('W{}'.format(self.name[-2:]), initializer = self.initializer ) else: W = tf.get_variable('W{}'.format(self.name[-2:]), shape=(self.kernel_size, 1, number_of_input_channels, self.output_channels), initializer = self.initializer) b = tf.Variable(tf.zeros([self.output_channels])) self.encoder_matrix = W Conv2d.layer_index += 1 output = tf.nn.conv2d(input, W, strides=self.strides, padding='SAME') # print('convd2: output_shape: {}'.format(utils.get_incoming_shape(output))) #output = lrelu(tf.add(tf.contrib.layers.batch_norm(output, activation_fn=tf.nn.relu, is_training=True, reuse=None), b)) output = lrelu(tf.add(utils.batch_norm_layer(output, self.is_training,'BN{}'.format(self.name[-2:])), b)) #output = lrelu(tf.add(tf.contrib.layers.batch_norm(output, decay=0.999, center=True, scale=True, updates_collections=None,is_training=True, reuse=None), b)) return output
def conv_2d_layer(incoming, nb_filter, filter_size, strides, kernel_init, is_training, bn=True, padding='same', activation=None, bias='True', bias_init=None, scope=None): """ 构建二维卷积层函数 :param incoming: 输入 :param nb_filter: 卷积核(feature map)个数 :param filter_size: 卷积核大小 :param strides: 步长 :param kernel_init: 卷积核参数初始化方法 :param is_training: 是否是训练 :param bn: 是否需要batch normalization :param padding: padding方法 :param activation: 激活函数 :param bias: 是否需要偏移量 :param bias_init: 偏移量初始化方法 :param scope: :return: 返回经过卷积的输出 """ input_shape = get_incoming_shape(incoming) # 输入必须为4维矩阵 assert len(input_shape) == 4 with tf.variable_scope(scope): con2d_output = tf.layers.conv2d(incoming, filters=nb_filter, kernel_size=filter_size, strides=strides, padding=padding, activation=activation, kernel_initializer=kernel_init, use_bias=bias, bias_initializer=bias_init) if bn: return tf.layers.batch_normalization(con2d_output, axis=-1, training=is_training) else: return con2d_output
def create_layer(self, input, is_training=True): self.input_shape = utils.get_incoming_shape(input) number_of_input_channels = self.input_shape[3] with tf.variable_scope('conv', reuse=False): W = tf.get_variable('W{}'.format(self.name), shape=(self.kernel_size, self.kernel_size, number_of_input_channels, self.output_channels)) b = tf.Variable(tf.zeros([self.output_channels])) self.encoder_matrix = W Conv2d.layer_index += 1 output = tf.nn.conv2d(input, W, strides=self.strides, padding='SAME') #output = lrelu(tf.add(tf.contrib.layers.batch_norm(output, scope="norm{}".format(self.name), is_training=is_training), b)) output = lrelu(tf.add(output, b)) return output
def create_layer(self, input): # print('convd2: input_shape: {}'.format(utils.get_incoming_shape(input))) self.input_shape = utils.get_incoming_shape(input) number_of_input_channels = self.input_shape[3] with tf.variable_scope('conv', reuse=False): W = tf.get_variable('W{}'.format(self.name[-3:]), shape=(self.kernel_size, self.kernel_size, number_of_input_channels, self.output_channels)) b = tf.Variable(tf.zeros([self.output_channels])) self.encoder_matrix = W Conv2d.layer_index += 1 output = tf.nn.conv2d(input, W, strides=self.strides, padding='SAME') # print('convd2: output_shape: {}'.format(utils.get_incoming_shape(output))) output = lrelu(tf.add(tf.contrib.layers.batch_norm(output), b)) return output
def create_layer(self, input): # print('convd2: input_shape: {}'.format(utils.get_incoming_shape(input))) self.input_shape = utils.get_incoming_shape(input) number_of_input_channels = self.input_shape[3] with tf.variable_scope('conv', reuse=None): W = tf.get_variable('W{}'.format(self.name[-3:]), shape=(self.kernel_size, self.kernel_size, number_of_input_channels, self.output_channels)) b = tf.Variable(tf.zeros([self.output_channels])) self.encoder_matrix = W Conv2d.layer_index += 1 output = tf.nn.conv2d(input, W, strides=self.strides, padding='SAME') # print('convd2: output_shape: {}'.format(utils.get_incoming_shape(output))) output = lrelu(tf.add(tf.contrib.layers.batch_norm(output), b)) return output