def __init__(self, layer: Conv2D): super().__init__(layer) W, b = layer.get_weights() i = Input(shape=layer.input_shape[1:]) u = Conv2D( filters=layer.filters, kernel_size=layer.kernel_size, strides=layer.strides, dilation_rate=layer.dilation_rate, padding=layer.padding, kernel_initializer=tf.constant_initializer(W), bias_initializer=tf.constant_initializer(b), ) self.up_func = K.function([i], [u(i)]) # Flip each filter vertically and horizontally W_flipped = W[::-1, ::-1, :, :] # Ensure the reconstructed image has correct number of channels W_t = np.moveaxis(W_flipped, 2, 3) b_t = np.zeros(W_t.shape[3]) o = Input(shape=layer.output_shape[1:]) d = Conv2D( filters=W_t.shape[-1], kernel_size=W_t.shape[:2], strides=layer.strides, dilation_rate=layer.dilation_rate, padding=layer.padding, kernel_initializer=tf.constant_initializer(W_t), bias_initializer=tf.constant_initializer(b_t), ) self.down_func = K.function([o], [d(o)])
def safe_init_conv2d(layer: layers.Conv2D): weights = layer.get_weights() w_kernel = scaled_constant(1, weights[0].shape) _logger.info(f"Setting weights for layer `{layer.name}` :: " f"{layer.__class__.__name__}{w_kernel.shape}") w = [w_kernel] if len(weights) > 1: w_bias = np.zeros_like(weights[1]) w.append(w_bias) layer.set_weights(w) _check_activation(layer)
def get_deconv2d(conv2d_layer: Conv2D): W = conv2d_layer.get_weights()[0] # W: kernel_width, kernel_height, kernel_depth, n_filters # Reverse the conv operation W = np.transpose(W, (0, 1, 3, 2)) # Transpose the columns and rows W = W[::-1, ::-1, :, :] n_filters = W.shape[3] kernel_size = W.shape[0] strides = conv2d_layer.strides padding = conv2d_layer.padding b = np.zeros(n_filters) return DeConv2D(n_filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=tf.constant_initializer(W), bias_initializer=tf.constant_initializer(b), trainable=False)
def get_deconv2d_reverse_bias(conv2d_layer: Conv2D): return ReverseBiasLayer(conv2d_layer.get_weights()[1])