def _video(self): # make the generator the only stream. ''' The foreground stream. It learns the dynamics in the foreground. ''' video = Sequential() video.add(Deconvolution3D(filters=512,output_shape =(None,4,4,2,512), kernel_size=(4,4,2), strides = (2,2,2), padding = 'valid', input_shape = (1,1,1,100))) video.add(BatchNormalization()) video.add(LeakyReLU(alpha=0.2)) video.add(Deconvolution3D(filters=256,output_shape =(None,8,8,4,256), kernel_size=(4,4,2), strides = (2,2,2), padding = 'same')) video.add(BatchNormalization()) video.add(LeakyReLU(alpha=0.2)) video.add(Deconvolution3D(filters=128,output_shape =(None,16,16,8,128), kernel_size=(4,4,2), strides = (2,2,2), padding = 'same')) video.add(BatchNormalization()) video.add(LeakyReLU(alpha=0.2)) video.add(Deconvolution3D(filters=64,output_shape =(None,32,32,16,64), kernel_size=(4,4,2), strides = (2,2,2), padding = 'same')) video.add(BatchNormalization()) video.add(LeakyReLU(alpha=0.2)) video.add(Deconvolution3D(filters=3, output_shape=(None,64,64,32,3),#(None,64,64,32,3), kernel_size=(4,4,4), strides=(2,2,2), padding='same', activity_regularizer=Activation('tanh'))) print("FOREGROUND STREAM") print(video.summary()) print("expecting input: {}\n\n".format(video.input)) return video
def get_upconv(depth, nb_filters, pool_size, image_shape, kernel_size=(2, 2, 2), strides=(2, 2, 2), deconvolution=False): if deconvolution: try: from keras_contrib.layers import Deconvolution3D except ImportError: raise ImportError( "Install keras_contrib in order to use deconvolution. Otherwise set deconvolution=False." ) return Deconvolution3D( filters=nb_filters, kernel_size=kernel_size, output_shape=compute_level_output_shape(filters=nb_filters, depth=depth, pool_size=pool_size, image_shape=image_shape), strides=strides, input_shape=compute_level_output_shape(filters=nb_filters, depth=depth + 1, pool_size=pool_size, image_shape=image_shape)) else: return UpSampling3D(size=pool_size)
def get_up_convolution(depth, n_filters, pool_size, image_shape, kernel_size=(2, 2, 2), strides=(2, 2, 2), deconvolution=False): if deconvolution: try: from keras_contrib.layers import Deconvolution3D except ImportError: raise ImportError( "Install keras_contrib in order to use deconvolution. Otherwise set deconvolution=False." "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git" ) return Deconvolution3D( filters=n_filters, kernel_size=kernel_size, output_shape=compute_level_output_shape(n_filters=n_filters, depth=depth, pool_size=pool_size, image_shape=image_shape), strides=strides, input_shape=compute_level_output_shape(n_filters=n_filters, depth=depth, pool_size=pool_size, image_shape=image_shape)) else: return UpSampling3D(size=pool_size)
def _mask(self,video): mask = Sequential() mask.add(video) mask.add(Deconvolution3D(filters=1, output_shape=(None,64,64,32,1),#(None,64,64,32,1), kernel_size=(4,4,2), strides=(2,2,2), padding = 'same', kernel_regularizer=l1(l=0.1), activity_regularizer=Activation('sigmoid'))) return mask
def _gen_net(self,video): ''' Implements the last convolution for the foreground ''' gen_net = Sequential() gen_net.add(video) gen_net.add(Deconvolution3D(filters=3, output_shape=(None,64,64,32,3),#(None,64,64,32,3), kernel_size=(4,4,4), strides=(2,2,2), padding='same', activity_regularizer=Activation('tanh'))) return gen_net
def upscaling_unet(my_specs, layers): """adds layers of a 3D-SRUnet""" sc = my_specs.sc n_levels = my_specs.n_levels n_convs = my_specs.n_convs n_fmaps = my_specs.n_fmaps kernel_size = my_specs.kernel_size merge_shapes = [] merge_index = [] if K.image_dim_ordering() == 'tf': spatial_slice = np.s_[1:-1] else: spatial_slice = np.s_[2:] for l in range(n_levels): #downstream for n_f, ks in zip(n_fmaps[l], kernel_size[l]): layers.append( Convolution3D(n_f, ks[0], ks[1] * sc + ((ks[1] * sc) % 2 - 1), (ks[2] * sc) % 2 - 1, init='he_normal', border_mode='same', activation='relu')(layers[-1])) if l < n_levels - 1: #intermediate upsampling (doesn't happen for bottom) if sc >= 2: pool_size = (1, 2, 2) # the later downsampling doesn't happen layers.append( Deconvolution3D( n_f, ks[0] * sc - 1, ks[1] * sc - 1, ks[2] * sc - 1, output_shape=( None, n_f, ) + tuple(layers[-1].get_shape().as_list()[spatial_slice] * np.array([sc, 1, 1])), subsample=(sc, 1, 1), init='he_normal', border_mode='same', activation='relu')(layers[-1])) last_layer_index = -2 sc /= 2 else: # downsampling in all dimensions now sc = 1 pool_size = (2, 2, 2) last_layer_index = -1 merge_shapes.append( list(layers[-1].get_shape().as_list()[spatial_slice])) merge_index.append(len(layers) - 1) layers.append( MaxPooling3D(pool_size=pool_size, border_mode='same')(layers[last_layer_index])) for l in range(n_levels - 1)[::-1]: layers.append( Deconvolution3D( n_fmaps[l][0], kernel_size[l][-1][0], kernel_size[l][-1][1], kernel_size[l][-1][2], output_shape=( None, n_fmaps[l][0], ) + tuple(layers[-1].get_shape().as_list()[spatial_slice] * np.array([2 * sc, 2, 2])), subsample=(2 * sc, 2, 2), init='he_normal', border_mode='same', activation='relu')(layers[-1])) offset = np.array(merge_shapes[l]) - np.array( layers[-1].get_shape().as_list()[spatial_slice]) if np.any(offset % 2 != 0): warnings.warn( 'For seamless tiling you need to choose a different input shape or kernel size' ) layers.append( Cropping3D(cropping=((offset[0] / 2, offset[0] / 2), (offset[1] / 2, offset[1] / 2), (offset[2] / 2, offset[2] / 2)))(layers[merge_index[l]])) if K.image_dim_ordering() == 'tf': ch_axis = -1 else: ch_axis = 1 layers.append( merge([layers[-1], layers[-2]], mode='concat', concat_axis=ch_axis)) for n_f, ks in zip(n_fmaps[l], kernel_size[l]): layers.append( Convolution3D(n_f, ks[0], ks[1], ks[2], init='he_normal', border_mode='same', activation='relu')(layers[-1])) layers.append( Convolution3D(1, 3, 3, 3, init='he_normal', border_mode='same')(layers[-1])) return layers
def sparsecoding(my_specs, layers, input_shape=(64, 64, 16)): """adds layers of a sparsecoding network (FSRCNN)""" if K.image_dim_ordering() == 'tf': spatial_slice = np.s_[1:-1] else: spatial_slice = np.s_[2:] layers.append( Convolution3D(my_specs.d, 5, 13, 13, init='he_normal', border_mode='same')(layers[-1])) layers.append(PReLU(init='zero', shared_axes=spatial_slice)(layers[-1])) layers.append( Convolution3D(my_specs.s, 1, 1, 1, init='he_normal', border_mode='same')(layers[-1])) layers.append(PReLU(init='zero', shared_axes=spatial_slice)(layers[-1])) for k in range(my_specs.m): layers.append( Convolution3D(my_specs.s, 3, 9, 9, init='he_normal', border_mode='same')(layers[-1])) layers.append( PReLU(init='zero', shared_axes=spatial_slice)(layers[-1])) layers.append( Convolution3D(my_specs.d, 1, 1, 1, init='he_normal', border_mode='same')(layers[-1])) layers.append(PReLU(init='zero', shared_axes=spatial_slice)(layers[-1])) spatial_output_shape = tuple( np.array(input_shape) * np.array((my_specs.sc, 1, 1))) if K.image_dim_ordering() == 'tf': output_shape = (None, ) + spatial_output_shape + (1, ) else: output_shape = ( None, 1, ) + spatial_output_shape layers.append( Deconvolution3D(1, 13, 13, 13, output_shape=output_shape, subsample=(my_specs.sc, 1, 1), border_mode='same', init=gaussian_init)(layers[-1])) return layers
def deconv_conv_unet_model_3d_conv_add_48_dalitaion(inputs, classes=2): conv1 = Conv3D(32, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(inputs) conv1 = keras_batchnormalization_relu(conv1) print "conv1 shape:", conv1.shape conv1 = Conv3D(32, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(conv1) conv1 = keras_batchnormalization_relu(conv1) print "conv1 shape:", conv1.shape pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) print "pool1 shape:",pool1.shape conv2 = Conv3D(64, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(pool1) conv2 = keras_batchnormalization_relu(conv2) print "conv2 shape:", conv2.shape conv2 = Conv3D(64, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(conv2) conv2 = keras_batchnormalization_relu(conv2) print "conv2 shape:", conv2.shape pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) print "pool2 shape:", pool2.shape conv3 = Conv3D(128, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(pool2) conv3 = keras_batchnormalization_relu(conv3) print "conv3 shape:", conv3.shape conv3 = Conv3D(128, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(conv3) conv3 = keras_batchnormalization_relu(conv3) print "conv3 shape:", conv3.shape pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) print "pool3 shape:", pool3.shape conv4 = Conv3D(256, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(pool3) conv4 = keras_batchnormalization_relu(conv4) print "conv4 shape:", conv4.shape conv4 = Conv3D(256, (3, 3, 3), dilation_rate=(2, 2, 2), activation='relu', padding='same')(conv4) conv4 = keras_batchnormalization_relu(conv4) print "conv4 shape:", conv4.shape deconv6 = Deconvolution3D(128, 2, output_shape=(None, 40, 60, 24, 128), strides=(2, 2, 2), padding='valid', input_shape=(32, 32, 6, 256), name='deconv6')(conv4) deconv6 = keras_batchnormalization_relu(deconv6) deconv6 = Conv3D(128, 3, activation='relu', padding='same')(deconv6) deconv6 = keras_batchnormalization_relu(deconv6) conv_conv4 = Conv3D(128, 3, activation='relu', padding='same')(conv3) conv_conv4 = keras_batchnormalization_relu(conv_conv4) up6 = concatenate([deconv6, conv_conv4],axis=-1) conv6 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up6) conv6 = keras_batchnormalization_relu(conv6) conv6 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv6) conv6 = keras_batchnormalization_relu(conv6) deconv7 = Deconvolution3D(64, 2, output_shape=(None, 80, 120, 48, 64), strides=(2, 2, 2), padding='valid', input_shape=(64, 64, 12, 128), name='deconv7')(conv6) deconv7 = keras_batchnormalization_relu(deconv7) deconv7 = Conv3D(64, 3, activation='relu', padding='same')(deconv7) deconv7 = keras_batchnormalization_relu(deconv7) conv_conv3 = Conv3D(64, 3, activation='relu', padding='same')(conv2) conv_conv3 = keras_batchnormalization_relu(conv_conv3) up7 = concatenate([deconv7, conv_conv3],axis=-1) conv7 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up7) conv7 = keras_batchnormalization_relu(conv7) conv7 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv7) conv7 = keras_batchnormalization_relu(conv7) deconv8 = Deconvolution3D(32, 2, output_shape=(None, 160, 240, 96, 32), strides=(2, 2, 2), padding='valid', input_shape=(64, 64, 24, 64), name='deconv8')(conv7) deconv8 = keras_batchnormalization_relu(deconv8) deconv8 = Conv3D(32, 3, activation='relu', padding='same')(deconv8) deconv8 = keras_batchnormalization_relu(deconv8) conv_conv2 = Conv3D(32, 3, activation='relu', padding='same')(conv1) conv_conv2 = keras_batchnormalization_relu(conv_conv2) up8 = concatenate([deconv8, conv_conv2], axis=-1) conv8 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up8) conv8 = keras_batchnormalization_relu(conv8) conv8 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv8) conv8 = keras_batchnormalization_relu(conv8) conv10 = Conv3D(classes, (1, 1, 1), activation='relu')(conv8) conv10 = keras_batchnormalization_relu(conv10) act = Conv3D(1, 1, activation='sigmoid')(conv10) # act = Activation('sigmoid')(conv10) # model = Model(inputs=inputs, outputs=act) # # # model.compile(optimizer=Adam(), loss=dice_coef_loss, metrics=[dice_coef]) # model.compile(optimizer=Adadelta(), loss=dice_coef_loss, metrics=[dice_coef]) return act
def encoder_simple_conv(img_shape, encoding_dim=32, NUM_CHANNELS=1): # workaround for Dropout to work #import tensorflow as tf #tf.python.control_flow_ops = tf from seg_util import conv_block from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten from keras.models import Sequential, Model from keras import regularizers input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS) # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term input_img = Input(shape=input_shape[1:]) x = input_img x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), border_mode='same')(x) x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), border_mode='same')(x) encoder_conv_shape = [ _.value for _ in x.get_shape() ] # x.get_shape() returns a list of tensorflow.python.framework.tensor_shape.Dimension objects x = Flatten()(x) if False: x = Dense(encoding_dim, activation='relu')(x) else: x = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))( x) # with sparsity encoded = x encoder = Model(input=input_img, output=encoded) print 'encoder', 'input shape', encoder.input_shape, 'output shape', encoder.output_shape input_img_decoder = Input(shape=encoder.output_shape[1:]) x = input_img_decoder x = Dense(N.prod(encoder_conv_shape[1:]), activation='relu')(x) x = Reshape(encoder_conv_shape[1:])(x) if True: x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = Convolution3D(1, 3, 3, 3, activation='linear', border_mode='same')( x ) # keep the output layer linear activation, so that the image intensity can be negative else: from keras_contrib.layers import Deconvolution3D x = Deconvolution3D( 1, 1, 1, 1, output_shape=input_shape, subsample=(4, 4, 4), activation='linear', border_mode='same' )( x ) # the correct choice of subsample is important to connect Deconvolution3D with output of last layer decoded = x decoder = Model(input=input_img_decoder, output=decoded) autoencoder = Sequential() if True: # model with expanded layers, and hopefully allow parallel training for l in encoder.layers: autoencoder.add(l) for l in decoder.layers: autoencoder.add(l) else: # build encoder according to ~/src/tmp/proj/gan/dcgan.py autoencoder.add(encoder) autoencoder.add(decoder) print('autoencoder layers:') for l in autoencoder.layers: print l.output_shape return {'autoencoder': autoencoder, 'encoder': encoder, 'decoder': decoder}
def createOutputNode(self, inputs): input = inputs[0] coord = inputs[1] # construct preblock preBlock_0 = Conv3D(24, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="preBlock.0")(input) preBlock_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="preBlock.1")(preBlock_0) preBlock_relu1 = Activation(K.relu)(preBlock_1) preBlock_3 = Conv3D(24, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="preBlock.3")( preBlock_relu1) preBlock_4 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="preBlock.4")(preBlock_3) preBlock_relu2 = Activation(K.relu)(preBlock_4) maxpool1 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding="valid")(preBlock_relu2) # the first forward forw1_0_conv1 = Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw1.0.conv1")( maxpool1) forw1_0_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw1.0.bn1")(forw1_0_conv1) forw1_0_relu1 = Activation(K.relu)(forw1_0_bn1) forw1_0_conv2 = Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw1.0.conv2")( forw1_0_relu1) forw1_0_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw1.0.bn2")(forw1_0_conv2) # forward1 short cut forw1_0_shortcut_0 = Conv3D(32, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="forw1.0.shortcut.0")(maxpool1) forw1_0_shortcut_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw1.0.shortcut.1")( forw1_0_shortcut_0) forw1_0_added = keras.layers.Add()([forw1_0_bn2, forw1_0_shortcut_1]) forw1_0_relu = Activation(K.relu)(forw1_0_added) forw1_1_conv1 = Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw1.1.conv1")( forw1_0_relu) forw1_1_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw1.1.bn1")(forw1_1_conv1) forw1_1_relu1 = Activation(K.relu)(forw1_1_bn1) forw1_1_conv2 = Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw1.1.conv2")( forw1_1_relu1) forw1_1_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw1.1.bn2")(forw1_1_conv2) forw1_1_added = keras.layers.Add()([forw1_1_bn2, forw1_0_relu]) forw1_1_relu = Activation(K.relu)(forw1_1_added) maxpool2 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding="valid")(forw1_1_relu) # the second forward forw2_0_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw2.0.conv1")( maxpool2) forw2_0_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw2.0.bn1")(forw2_0_conv1) forw2_0_relu1 = Activation(K.relu)(forw2_0_bn1) forw2_0_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw2.0.conv2")( forw2_0_relu1) forw2_0_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw2.0.bn2")(forw2_0_conv2) # forward2 short cut forw2_0_shortcut_0 = Conv3D(64, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="forw2.0.shortcut.0")(maxpool2) forw2_0_shortcut_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw2.0.shortcut.1")( forw2_0_shortcut_0) forw2_0_added = keras.layers.Add()([forw2_0_bn2, forw2_0_shortcut_1]) forw2_0_relu = Activation(K.relu)(forw2_0_added) forw2_1_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw2.1.conv1")( forw2_0_relu) forw2_1_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw2.1.bn1")(forw2_1_conv1) forw2_1_relu1 = Activation(K.relu)(forw2_1_bn1) forw2_1_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw2.1.conv2")( forw2_1_relu1) forw2_1_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw2.1.bn2")(forw2_1_conv2) forw2_1_added = keras.layers.Add()([forw2_1_bn2, forw2_0_relu]) forw2_1_relu = Activation(K.relu)(forw2_1_added) maxpool3 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding="valid")(forw2_1_relu) # the third forward forw3_0_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw3.0.conv1")( maxpool3) forw3_0_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.0.bn1")(forw3_0_conv1) forw3_0_relu1 = Activation(K.relu)(forw3_0_bn1) forw3_0_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw3.0.conv2")( forw3_0_relu1) forw3_0_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.0.bn2")(forw3_0_conv2) # forward3 short cut forw3_0_shortcut_0 = Conv3D(64, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="forw3.0.shortcut.0")(maxpool3) forw3_0_shortcut_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.0.shortcut.1")( forw3_0_shortcut_0) forw3_0_added = keras.layers.Add()([forw3_0_bn2, forw3_0_shortcut_1]) forw3_0_relu = Activation(K.relu)(forw3_0_added) forw3_1_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw3.1.conv1")( forw3_0_relu) forw3_1_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.1.bn1")(forw3_1_conv1) forw3_1_relu1 = Activation(K.relu)(forw3_1_bn1) forw3_1_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw3.1.conv2")( forw3_1_relu1) forw3_1_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.1.bn2")(forw3_1_conv2) forw3_1_added = keras.layers.Add()([forw3_1_bn2, forw3_0_relu]) forw3_1_relu = Activation(K.relu)(forw3_1_added) forw3_2_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw3.2.conv1")( forw3_1_relu) forw3_2_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.2.bn1")(forw3_2_conv1) forw3_2_relu1 = Activation(K.relu)(forw3_2_bn1) forw3_2_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw3.2.conv2")( forw3_2_relu1) forw3_2_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw3.2.bn2")(forw3_2_conv2) forw3_2_added = keras.layers.Add()([forw3_2_bn2, forw3_1_relu]) forw3_2_relu = Activation(K.relu)(forw3_2_added) maxpool4 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding="valid")(forw3_2_relu) # the fourth forward forw4_0_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw4.0.conv1")( maxpool4) forw4_0_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.0.bn1")(forw4_0_conv1) forw4_0_relu1 = Activation(K.relu)(forw4_0_bn1) forw4_0_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw4.0.conv2")( forw4_0_relu1) forw4_0_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.0.bn2")(forw4_0_conv2) # forward4 short cut forw4_0_shortcut_0 = Conv3D(64, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="forw4.0.shortcut.0")(maxpool4) forw4_0_shortcut_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.0.shortcut.1")( forw4_0_shortcut_0) forw4_0_added = keras.layers.Add()([forw4_0_bn2, forw4_0_shortcut_1]) forw4_0_relu = Activation(K.relu)(forw4_0_added) forw4_1_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw4.1.conv1")( forw4_0_relu) forw4_1_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.1.bn1")(forw4_1_conv1) forw4_1_relu1 = Activation(K.relu)(forw4_1_bn1) forw4_1_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw4.1.conv2")( forw4_1_relu1) forw4_1_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.1.bn2")(forw4_1_conv2) forw4_1_added = keras.layers.Add()([forw4_1_bn2, forw4_0_relu]) forw4_1_relu = Activation(K.relu)(forw4_1_added) forw4_2_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw4.2.conv1")( forw4_1_relu) forw4_2_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.2.bn1")(forw4_2_conv1) forw4_2_relu1 = Activation(K.relu)(forw4_2_bn1) forw4_2_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="forw4.2.conv2")( forw4_2_relu1) forw4_2_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="forw4.2.bn2")(forw4_2_conv2) forw4_2_added = keras.layers.Add()([forw4_2_bn2, forw4_1_relu]) forw4_2_relu = Activation(K.relu)(forw4_2_added) # Path 1 path1_0 = Deconvolution3D(64, kernel_size=(2, 2, 2), strides=(2, 2, 2), name="path1.0", output_shape=(None, 64, 16, 16, 16))(forw4_2_relu) path1_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="path1.1")(path1_0) path1_0_relu1 = Activation(K.relu)(path1_1) # comb3 comb3 = keras.layers.concatenate([path1_0_relu1, forw3_2_relu], axis=1) # back3 back3_0_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back3.0.conv1")( comb3) back3_0_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.0.bn1")(back3_0_conv1) back3_0_relu1 = Activation(K.relu)(back3_0_bn1) back3_0_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back3.0.conv2")( back3_0_relu1) back3_0_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.0.bn2")(back3_0_conv2) # back3 short cut back3_0_shortcut_0 = Conv3D(64, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="back3.0.shortcut.0")(comb3) back3_0_shortcut_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.0.shortcut.1")( back3_0_shortcut_0) back3_0_added = keras.layers.Add()([back3_0_bn2, back3_0_shortcut_1]) back3_0_relu = Activation(K.relu)(back3_0_added) back3_1_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back3.1.conv1")( back3_0_relu) back3_1_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.1.bn1")(back3_1_conv1) back3_1_relu1 = Activation(K.relu)(back3_1_bn1) back3_1_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back3.1.conv2")( back3_1_relu1) back3_1_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.1.bn2")(back3_1_conv2) back3_1_added = keras.layers.Add()([back3_1_bn2, back3_0_relu]) back3_1_relu = Activation(K.relu)(back3_1_added) back3_2_conv1 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back3.2.conv1")( back3_1_relu) back3_2_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.2.bn1")(back3_2_conv1) back3_2_relu1 = Activation(K.relu)(back3_2_bn1) back3_2_conv2 = Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back3.2.conv2")( back3_2_relu1) back3_2_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back3.2.bn2")(back3_2_conv2) back3_2_added = keras.layers.Add()([back3_2_bn2, back3_1_relu]) back3_2_relu = Activation(K.relu)(back3_2_added) # Path 2 path2_0 = Deconvolution3D(64, kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="valid", name="path2.0", output_shape=(None, 64, 32, 32, 32))(back3_2_relu) path2_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="path2.1")(path2_0) path2_0_relu1 = Activation(K.relu)(path2_1) # comb2 comb2 = keras.layers.concatenate([path2_0_relu1, forw2_1_relu, coord], axis=1) # back 2 back2_0_conv1 = Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back2.0.conv1")( comb2) back2_0_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.0.bn1")(back2_0_conv1) back2_0_relu1 = Activation(K.relu)(back2_0_bn1) back2_0_conv2 = Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back2.0.conv2")( back2_0_relu1) back2_0_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.0.bn2")(back2_0_conv2) # back2 short cut back2_0_shortcut_0 = Conv3D(128, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="back2.0.shortcut.0")(comb2) back2_0_shortcut_1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.0.shortcut.1")( back2_0_shortcut_0) back2_0_added = keras.layers.Add()([back2_0_bn2, back2_0_shortcut_1]) back2_0_relu = Activation(K.relu)(back2_0_added) back2_1_conv1 = Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back2.1.conv1")( back2_0_relu) back2_1_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.1.bn1")(back2_1_conv1) back2_1_relu1 = Activation(K.relu)(back2_1_bn1) back2_1_conv2 = Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back2.1.conv2")( back2_1_relu1) back2_1_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.1.bn2")(back2_1_conv2) back2_1_added = keras.layers.Add()([back2_1_bn2, back2_0_relu]) back2_1_relu = Activation(K.relu)(back2_1_added) back2_2_conv1 = Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back2.2.conv1")( back2_1_relu) back2_2_bn1 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.2.bn1")(back2_2_conv1) back2_2_relu1 = Activation(K.relu)(back2_2_bn1) back2_2_conv2 = Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding="same", name="back2.2.conv2")( back2_2_relu1) back2_2_bn2 = BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, name="back2.2.bn2")(back2_2_conv2) back2_2_added = keras.layers.Add()([back2_2_bn2, back2_1_relu]) feat = Activation(K.relu)(back2_2_added) dropout_2 = Dropout(0.2)(feat) # Output output_0 = Conv3D(64, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="output.0")(dropout_2) output_relu = Activation(K.relu)(output_0) output_2 = Conv3D(5 * len(config['anchors']), kernel_size=(1, 1, 1), strides=(1, 1, 1), padding="valid", name="output.2")(output_relu) print(feat.shape) print(output_2.shape) size = K.int_shape(output_2) output = Lambda(self.postprocessOutputTensor, output_shape=(-1, size[2], size[3], size[4], len(config['anchors']), 5))(output_2) return output, feat