def conv3d_unknown_dim(inputs, filters, kernel_size, name, activation=None, kernel_initializer=he_initializer, bias_initializer=zeros_initializer, normalization=None, is_training=False, data_format='channels_first', padding='same', strides=(1, 1, 1), debug_print=debug_print_conv): node, padding_for_conv = pad_for_conv(inputs=inputs, kernel_size=kernel_size, name=name, padding=padding, data_format=data_format) # outputs = tf.layers.conv3d(inputs=node, # filters=filters, # kernel_size=kernel_size, # name=name, # kernel_initializer=kernel_initializer, # bias_initializer=bias_initializer, # trainable=is_training, # data_format=data_format, # kernel_regularizer=tf.nn.l2_loss, # padding=padding_for_conv) inputs_shape = inputs.get_shape().as_list() num_inputs = inputs_shape[1] W = tf.get_variable(name + '_w', [kernel_size[0], kernel_size[1], kernel_size[2], num_inputs, filters], initializer=kernel_initializer, regularizer=tf.nn.l2_loss) b = tf.get_variable(name + '_b', [1, filters, 1, 1, 1], initializer=bias_initializer) outputs = tf.nn.conv3d(inputs, W, strides=[1, 1, 1, 1, 1], padding='SAME', data_format='NCDHW', name=name) outputs += b #outputs_shape = outputs.shape #outputs_4d = tf.reshape(outputs, [outputs_shape[0], outputs_shape[1], outputs_shape[2] * outputs_shape[3], outputs_shape[4]]) #outputs_4d = tf.nn.bias_add(outputs_4d, b, data_format='NCHW') #outputs = tf.reshape(outputs_4d, outputs_shape) if normalization is not None: outputs = normalization(outputs, is_training=is_training, data_format=data_format, name=name+'/norm') if activation is not None: outputs = activation(outputs) if debug_print: print_conv_parameters(inputs=inputs, outputs=outputs, kernel_size=kernel_size, name=name, activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, normalization=normalization, is_training=is_training, data_format=data_format, padding=padding, strides=strides) return outputs
def conv3d_transpose_unknown_dim(inputs, filters, kernel_size, name, activation=None, kernel_initializer=he_initializer, bias_initializer=zeros_initializer, normalization=None, is_training=False, data_format='channels_first', padding='same', strides=(1, 1, 1), debug_print=debug_print_conv): inputs_shape = inputs.get_shape().as_list() num_inputs = inputs_shape[1] W = tf.get_variable( name + '_w', [kernel_size[0], kernel_size[1], kernel_size[2], num_inputs, filters], initializer=kernel_initializer, regularizer=tf.nn.l2_loss) b = tf.get_variable(name + '_b', [1, filters, 1, 1, 1], initializer=bias_initializer) output_shape = [inputs_shape[0], filters] + inputs_shape[2:] outputs = tf.nn.conv3d_transpose(inputs, W, strides=[1, 1, 1, 1, 1], padding='VALID', data_format='NCDHW', output_shape=output_shape, name=name) outputs += b if normalization is not None: outputs = normalization(outputs, is_training=is_training, data_format=data_format, name=name + '/norm') if activation is not None: outputs = activation(outputs) if debug_print: print_conv_parameters(inputs=inputs, outputs=outputs, kernel_size=kernel_size, name=name, activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, normalization=normalization, is_training=is_training, data_format=data_format, padding=padding, strides=strides) return outputs
def conv2d(inputs, filters, kernel_size, name, activation=None, kernel_initializer=he_initializer, bias_initializer=zeros_initializer, normalization=None, is_training=False, data_format='channels_first', padding='same', strides=(1, 1), use_bias=True, debug_print=debug_print_conv): node, padding_for_conv = pad_for_conv(inputs=inputs, kernel_size=kernel_size, name=name, padding=padding, data_format=data_format) outputs = tf.layers.conv2d(inputs=node, filters=filters, kernel_size=kernel_size, name=name, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, trainable=is_training, data_format=data_format, kernel_regularizer=tf.nn.l2_loss, padding=padding_for_conv, strides=strides, use_bias=use_bias) if normalization is not None: outputs = normalization(outputs, is_training=is_training, data_format=data_format, name=name+'/norm') if activation is not None: outputs = activation(outputs, name=name+'/activation') if debug_print: print_conv_parameters(inputs=inputs, outputs=outputs, kernel_size=kernel_size, name=name, activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, normalization=normalization, is_training=is_training, data_format=data_format, padding=padding, strides=strides, use_bias=use_bias) return outputs