def unet3D(x_in, img_shape, out_im_chans, nf_enc=[64, 64, 128, 128, 256, 256, 512], nf_dec=None, layer_prefix='unet', n_convs_per_stage=1, ): ks = 3 x = x_in encodings = [] encoding_vol_sizes = [] for i in range(len(nf_enc)): for j in range(n_convs_per_stage): x = Conv3D( nf_enc[i], kernel_size=ks, strides=(1, 1, 1), padding='same', name='{}_enc_conv3D_{}_{}'.format(layer_prefix, i, j + 1))(x) x = LeakyReLU(0.2)(x) encodings.append(x) encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1])) if i < len(nf_enc) - 1: x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x) if nf_dec is None: nf_dec = list(reversed(nf_enc[1:])) for i in range(len(nf_dec)): curr_shape = x.get_shape().as_list()[1:-1] # only do upsample if we are not yet at max resolution if np.any(curr_shape < list(img_shape[:len(curr_shape)])): us = (2, 2, 2) x = UpSampling3D(size=us, name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x) # just concatenate the final layer here if i <= len(encodings) - 2: x = _pad_or_crop_to_shape_3D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_vol_sizes[-i-2]) x = Concatenate(axis=-1)([x, encodings[-i-2]]) for j in range(n_convs_per_stage): x = Conv3D(nf_dec[i], kernel_size=ks, strides=(1, 1, 1), padding='same', name='{}_dec_conv3D_{}_{}'.format(layer_prefix, i, j))(x) x = LeakyReLU(0.2)(x) y = Conv3D(out_im_chans, kernel_size=1, padding='same', name='{}_dec_conv3D_final'.format(layer_prefix))(x) # add your own activation after this model # add your own activation after this model return y
def call(self, x): inp = x kernel = K.random_uniform_variable(shape=(self.kernel_size[0], self.kernel_size[1], self.out_shape[-1], int(x.get_shape()[-1])), low=0, high=1) deconv = K.conv2d_transpose(x, kernel=kernel, strides=self.strides, output_shape=self.out_shape, padding='same') biases = K.zeros(shape=(self.out_shape[-1])) deconv = K.reshape(K.bias_add(deconv, biases), deconv.get_shape()) deconv = LeakyReLU()(deconv) g = K.conv2d_transpose(inp, kernel, output_shape=self.out_shape, strides=self.strides, padding='same') biases2 = K.zeros(shape=(self.out_shape[-1])) g = K.reshape(K.bias_add(g, biases2), deconv.get_shape()) g = K.sigmoid(g) deconv = tf.multiply(deconv, g) outputs = [deconv, g] output_shapes = self.compute_output_shape(x.shape) for output, shape in zip(outputs, output_shapes): output._keras_shape = shape return [deconv, g]
def block(x): x = Conv2D(out_shape[2] * 4, 3, padding='same')(x) x = LeakyReLU(0.1)(x) x = PixelShuffler()(x) shape = x.get_shape().as_list()[1:] cx = shape[0] - out_shape[0] cy = shape[1] - out_shape[1] if cx != 0 or cy != 0: x = Cropping2D(((0, cx), (0, cy)))(x) return x