Exemple #1
0
 def __init__(self, latent_dim, input_dim, learning_rate, num_classes, embed_dim):
     super(CCVAE, self).__init__()
     self.optimizer = tf.keras.optimizers.Adam(learning_rate)
     self.latent_dim = latent_dim
     self.gen_layers = 5
     self.gen_init_size = int(input_dim / (2 ** (self.gen_layers-1)))
     self.reshape_channels = 16
     self.prior_dim = self.gen_init_size ** 3 * self.reshape_channels
                     
     self.enc_model = tf.keras.Sequential()
     self.enc_model.add(InputLayer(input_shape=(input_dim, input_dim, input_dim, 1)))
     self.enc_model.add(Conv3D( filters=32, kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu'))
     self.enc_model.add(Conv3D( filters=64, kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu'))
     self.enc_model.add(Conv3D( filters=128,kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu'))
     self.enc_model.add(Flatten())
     self.enc_model.add(Dense(self.latent_dim + self.latent_dim))
     
     txt_input = Input(shape=(1,))
     xt = Embedding(input_length=1, input_dim=num_classes, output_dim=embed_dim)(txt_input)
     xt = Flatten()(xt)
     xt = Dense( int(self.prior_dim/2), activation=tf.nn.relu )(xt)
     xt = Dense( self.prior_dim, activation=tf.nn.relu )(xt)
     txt_output = Reshape(target_shape=(self.gen_init_size, self.gen_init_size, self.gen_init_size, self.reshape_channels))(xt)
     
     enc_input = Input(shape=(self.latent_dim,))
     xgen = Dense(units= (self.gen_init_size ** 3) * self.reshape_channels, activation=tf.nn.relu)(enc_input)
     xgen = Reshape(target_shape=(self.gen_init_size, self.gen_init_size, self.gen_init_size, self.reshape_channels))(xgen)        
     xgen = Concatenate(axis=-1)([txt_output, xgen])                
     xgen = Conv3DTranspose( filters=256, kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu')(xgen)
     xgen = Conv3DTranspose( filters=128, kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu')(xgen)
     xgen = Conv3DTranspose( filters=64,  kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu')(xgen)
     xgen = Conv3DTranspose( filters=32,  kernel_size=4, strides=(2, 2, 2), padding="SAME", activation='relu')(xgen)
     gen_out = Conv3DTranspose( filters=1,   kernel_size=4, strides=(1, 1, 1), padding="SAME")(xgen)
     self.gen_model = tf.keras.models.Model(inputs=[enc_input, txt_input], outputs=[gen_out])
Exemple #2
0
def autodecoder(latent):
    # decoder
    latent = Conv3DTranspose(data_format='channels_last',
                             filters=1,
                             kernel_size=(2, 2, 2),
                             strides=stride,
                             activation='relu',
                             padding=padding)(latent)
    x = UpSampling3D(data_format='channels_last', size=(1, 1, 2))(latent)
    x = Conv3DTranspose(data_format='channels_last',
                        filters=16,
                        kernel_size=(2, 2, 2),
                        strides=stride,
                        activation='relu',
                        padding=padding)(x)
    x = UpSampling3D(data_format='channels_last', size=(3, 3, 2))(x)
    x = Conv3DTranspose(data_format='channels_last',
                        filters=64,
                        kernel_size=(3, 3, 3),
                        strides=stride,
                        activation='relu',
                        padding=padding)(x)
    x = UpSampling3D(data_format='channels_last', size=(3, 2, 2))(x)
    decoded = Conv3DTranspose(data_format='channels_last',
                              filters=32,
                              kernel_size=(3, 3, 3),
                              strides=stride,
                              activation='relu',
                              padding=padding,
                              name='decode')(x)
    # softmax trying(seems not good)
    # relu good
    # sigmoid (seems not good)
    return decoded
Exemple #3
0
def load_model():
	"""
	Return the model used for abnormal event 
	detection in videos using spatiotemporal autoencoder

	"""
	model=Sequential()
	model.add(Conv3D(filters=128,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',input_shape=(227,227,10,1),activation='tanh'))
	model.add(Conv3D(filters=64,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))



	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,padding='same',dropout=0.4,recurrent_dropout=0.3,return_sequences=True))

	
	model.add(ConvLSTM2D(filters=32,kernel_size=(3,3),strides=1,padding='same',dropout=0.3,return_sequences=True))


	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,return_sequences=True, padding='same',dropout=0.5))




	model.add(Conv3DTranspose(filters=128,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))
	model.add(Conv3DTranspose(filters=1,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',activation='tanh'))

	model.compile(optimizer='adam',loss='mean_squared_error',metrics=['accuracy'])

	return model
    def build(self):
        wmn_input = Input(shape=self.input_shape)

        block_A = CBR(wmn_input, 16, (3, 3, 3))
        block_A = CBR(block_A, 32, (3, 3, 3))

        block_B = MaxPooling3D(pool_size=(2, 2, 1))(block_A)
        block_B = CBR(block_B, 32, (3, 3, 3))
        block_B = CBR(block_B, 64, (3, 3, 3))

        block_C = MaxPooling3D(pool_size=(2, 2, 1))(block_B)
        block_C = CBR(block_C, 64, (3, 3, 3))
        block_C = CBR(block_C, 128, (3, 3, 3))
        block_C = Dropout(rate=self.dropout_rate)(block_C)
        block_C = Conv3DTranspose(filters=64, kernel_size=(3, 3, 3), strides=(2, 2, 1), padding='same')(block_C)

        block_D = Concatenate()([block_C, block_B])
        block_D = CBR(block_D, 64, (3, 3, 3))
        block_D = CBR(block_D, 64, (3, 3, 3))
        block_D = Conv3DTranspose(filters=32, kernel_size=(3, 3, 3), strides=(2, 2, 1), padding='same')(block_D)

        block_E = Concatenate()([block_D, block_A])
        block_E = CBR(block_E, 32, (3, 3, 3))
        block_E = CBR(block_E, 32, (3, 3, 3))

        if self.single_slice_out:
            block_E = Conv3D(filters=32, kernel_size=(1, 1, 5))(block_E)
            csfn_output = Conv3D(filters=1, kernel_size=(1, 1, 1))(block_E)
            csfn_output = Reshape((256, 256, 1))(csfn_output)
        else:
            csfn_output = Conv3D(filters=1, kernel_size=(1, 1, 1))(block_E)
        self.csfn_output_name = csfn_output.name.split('/')[0]

        self.model = keras.Model(inputs=[wmn_input], outputs=[csfn_output], name='network')
Exemple #5
0
 def __init__(self,
              filters,
              data_format=None,
              kernel_size=(3, 3, 3),
              strides=(2, 2, 2),
              activation=tf.nn.relu,
              *args,
              **kwargs):
     data_format = conv_utils.normalize_data_format(data_format)
     params = {
         'padding': 'same',
         'data_format': data_format,
         'use_bias': True,
         'activation': activation,
         'filters': filters,
         'kernel_size': kernel_size
     }
     layers = [
         Conv3DTranspose(strides=strides, **params),
         Conv3DTranspose(**params),
         Conv3DTranspose(**params)
     ]
     super(SynthesisBlock, self).__init__(layers,
                                          *args,
                                          data_format=data_format,
                                          **kwargs)
Exemple #6
0
def bneck_resid3d_up(x, filt, ksize, dfmt, strides, dropout, act, policy='float32'):

    # create shortcut, upsample skip with conv layer
    shortcut = x
    shortcut = Conv3DTranspose(filt, kernel_size=[1, 1, 1], strides=strides, padding='same', data_format=dfmt,
                               dtype=policy)(shortcut)

    # perform first 1x1x1 conv for bottleneck block using 1/4 filters
    x = BatchNormalization(axis=-1 if dfmt == 'channels_last' else 1)(x)
    x = activation_layer(act)(x)
    x = Conv3D(int(round(filt/4)), kernel_size=[1, 1, 1], padding='same', data_format=dfmt, dtype=policy)(x)

    # perform 3x3x3 conv for bottleneck block with bn and activation using 1/4 filters and upsample
    x = BatchNormalization(axis=-1 if dfmt == 'channels_last' else 1)(x)
    x = activation_layer(act)(x)
    x = Conv3DTranspose(int(round(filt/4)), ksize, strides=strides, padding='same', data_format=dfmt, dtype=policy)(x)

    # perform second 1x1x1 conv with full filters (no strides)
    x = BatchNormalization(axis=-1 if dfmt == 'channels_last' else 1)(x)
    x = activation_layer(act)(x)
    x = Conv3D(filt, kernel_size=[1, 1, 1], padding='same', data_format=dfmt, dtype=policy)(x)

    # optional dropout layer
    if dropout > 0.:
        x = Dropout(rate=dropout)(x)

    # fuse shortcut with tensor output
    x = tf.add(x, shortcut)

    return x
Exemple #7
0
    def build(self):
        """Builds the Keras model for the generator.
        """
        input_shape = (None, None, None, self.input_channels)
        input_ = Input(shape=input_shape)
        x = input_
        #x = DepthwiseSeparableConv3dTranspose(
        #    128, 64, (4,4,4), strides=(2,2,2), activation='relu',
        #    padding='same', kernel_initializer='he_normal')(x)
        x = Conv3DTranspose(128, (3, 3, 3), strides=(2, 2, 2),
                            padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.01)(x)

        x = Conv3DTranspose(64, (3, 3, 3), strides=(2, 2, 2),
                            padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.01)(x)

        x = Conv3DTranspose(32, (3, 3, 3), strides=(2, 2, 2),
                            padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.01)(x)

        x = Conv3D(self.output_channels, (3, 3, 3),
                   padding='same',
                   activation=None)(x)
        output = x
        self.model = Model(input_, output)
Exemple #8
0
 def __init__(self, filters, data_format=None, activation=tf.nn.relu, *args, **kwargs):
     data_format = conv_utils.normalize_data_format(data_format)
     params = {'strides': (2, 2, 2), 'padding': 'same', 'data_format': data_format, 'use_bias': True,
               'activation': activation}
     layers = [Conv3DTranspose(filters, (5, 5, 5), **params),
               Conv3DTranspose(filters, (5, 5, 5), **params),
               Conv3DTranspose(1, (9, 9, 9), **params)]
     super(SynthesisTransformV1, self).__init__(layers, *args, **kwargs)
def BuildModel(input_shape=(227, 227, 10, 1)):
    if len(input_shape) != 4 or type(input_shape) != tuple:
        raise ValueError(
            'Invalid value given to the argument `input_shape`, it must be a `tuple` containing 4 values in this manner: (height, width, frames_per_input, channels)'
        )
    input = Input(shape=input_shape)
    #Spatial Encoder
    spatial_enc = Conv3D(filters=128,
                         kernel_size=(11, 11, 1),
                         strides=(4, 4, 1),
                         padding='valid',
                         activation='tanh')(input)
    spatial_enc = Conv3D(filters=64,
                         kernel_size=(5, 5, 1),
                         strides=(2, 2, 1),
                         padding='valid',
                         activation='tanh')(spatial_enc)

    #Temporal Encoder
    temporal_enc = ConvLSTM2D(filters=64,
                              kernel_size=(3, 3),
                              strides=1,
                              padding='same',
                              dropout=0.4,
                              recurrent_dropout=0.3,
                              return_sequences=True)(spatial_enc)
    temporal_enc = ConvLSTM2D(filters=32,
                              kernel_size=(3, 3),
                              strides=1,
                              padding='same',
                              dropout=0.3,
                              return_sequences=True)(temporal_enc)

    #Temporal Decoder
    temporal_dec = ConvLSTM2D(filters=64,
                              kernel_size=(3, 3),
                              strides=1,
                              return_sequences=True,
                              padding='same',
                              dropout=0.5)(temporal_enc)

    #Spatial Decoder
    spatial_dec = Conv3DTranspose(filters=128,
                                  kernel_size=(5, 5, 1),
                                  strides=(2, 2, 1),
                                  padding='valid',
                                  activation='tanh')(temporal_dec)
    spatial_dec = Conv3DTranspose(filters=1,
                                  kernel_size=(11, 11, 1),
                                  strides=(4, 4, 1),
                                  padding='valid',
                                  activation='tanh')(spatial_dec)

    # Model
    model = Model(inputs=input, outputs=spatial_dec)
    encoder = Model(inputs=input, outputs=temporal_enc)
    return model, encoder
 def __init__(self, filters=32, data_format='channels_first', activation=tf.nn.relu, **kwargs):
     super().__init__()
     params = {'strides': (2, 2, 2), 'padding': 'same', 'data_format': data_format, 'activation': activation, 'use_bias': True}
     self.conv3d_0 = Conv3D(name='conv3d_0', filters=filters, kernel_size=(5, 5, 5), **params)
     self.conv3d_1 = Conv3D(name='conv3d_1', filters=filters, kernel_size=(5, 5, 5), **params)
     self.conv3d_2 = Conv3D(name='conv3d_2', filters=filters, kernel_size=(5, 5, 5), **params)
     self.conv3dt_0 = Conv3DTranspose(name='conv3dt_0', filters=filters, kernel_size=(5, 5, 5), **params)
     self.conv3dt_1 = Conv3DTranspose(name='conv3dt_1', filters=filters, kernel_size=(5, 5, 5), **params)
     self.conv3dt_2 = Conv3DTranspose(name='conv3dt_2', filters=1, kernel_size=(5, 5, 5), **{**params, 'activation': 'sigmoid'})
    def simple_block_3d(input,
                        number_of_filters,
                        downsample=False,
                        upsample=False,
                        convolution_kernel_size=(3, 3, 3),
                        deconvolution_kernel_size=(2, 2, 2),
                        weight_decay=0.0,
                        dropout_rate=0.0):

        number_of_output_filters = number_of_filters

        output = BatchNormalization()(input)
        output = ThresholdedReLU(theta=0)(output)

        if downsample:
            output = MaxPooling3D(pool_size=(2, 2, 2))(output)

        output = Conv3D(
            filters=number_of_filters,
            kernel_size=convolution_kernel_size,
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay))(output)

        if upsample:
            output = Conv3DTranspose(
                filters=number_of_filters,
                kernel_size=deconvolution_kernel_size,
                padding='same',
                kernel_initializer=initializers.he_normal(),
                kernel_regularizer=regularizers.l2(weight_decay))(output)
            output = UpSampling3D(size=(2, 2, 2))(output)

        if dropout_rate > 0.0:
            output = Dropout(rate=dropout_rate)(output)

        # Modify the input so that it has the same size as the output

        if downsample:
            input = Conv3D(filters=number_of_output_filters,
                           kernel_size=(1, 1, 1),
                           strides=(2, 2, 2),
                           padding='same')(input)
        elif upsample:
            input = Conv3DTranspose(filters=number_of_output_filters,
                                    kernel_size=(1, 1, 1),
                                    padding='same')(input)
            input = UpSampling3D(size=(2, 2, 2))(input)
        elif number_of_filters != number_of_output_filters:
            input = Conv3D(filters=number_of_output_filters,
                           kernel_size=(1, 1, 1),
                           padding='same')(input)

        output = skip_connection(input, output)

        return (output)
Exemple #12
0
    def generator(self):
        r"""Generator module for VoxelGAN(3DGAN). Use it as a regular TensorFlow 2.0 Keras Model.

        Return:
            A tf.keras model  
        """

        noise_dim = self.config["noise_dim"]
        gen_channels = self.config["gen_channels"]
        gen_layers = len(gen_channels)
        activation = self.config["activation"]
        kernel_initializer = self.config["kernel_initializer"]
        kernel_size = self.config["kernel_size"]
        kernel_regularizer = self.config["kernel_regularizer"]

        assert (
            2**(gen_layers + 2) == self.side_length
        ), "2^(Number of generator channels) must be equal to side_length / 4"

        model = tf.keras.Sequential()

        model.add(
            Dense(
                2 * 2 * 2,
                activation=activation,
                kernel_initializer=kernel_initializer,
                input_dim=noise_dim,
            ))
        model.add(BatchNormalization())
        model.add(Reshape((2, 2, 2, 1)))

        for channel in gen_channels:
            model.add(
                Conv3DTranspose(
                    channel,
                    kernel_size,
                    strides=2,
                    padding="same",
                    activation=activation,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer,
                ))
            model.add(BatchNormalization())

        model.add(
            Conv3DTranspose(1,
                            kernel_size,
                            strides=2,
                            padding="same",
                            activation="sigmoid"))

        return model
    def create_model_3D(self, input_shape, n_labels=2):
        # Input layer
        inputs = Input(input_shape)
        # Start the CNN Model chain with adding the inputs as first tensor
        cnn_chain = inputs
        # Cache contracting normalized conv layers
        # for later copy & concatenate links
        contracting_convs = []

        # First contracting layer
        neurons = self.feature_map[0]
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        contracting_convs.append(cnn_chain)
        cnn_chain = MaxPooling3D(pool_size=(1, 2, 2))(cnn_chain)

        # Remaining contracting layers
        for i in range(1, len(self.feature_map)):
            neurons = self.feature_map[i]
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
            contracting_convs.append(cnn_chain)
            cnn_chain = MaxPooling3D(pool_size=(2, 2, 2))(cnn_chain)

        # Middle Layer
        neurons = self.feature_map[-1]
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)

        # Expanding Layers except last layer
        for i in reversed(range(1, len(self.feature_map))):
            neurons = self.feature_map[i]
            cnn_chain = Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2),
                                        padding='same')(cnn_chain)
            cnn_chain = concatenate([cnn_chain, contracting_convs[i]], axis=-1)
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)

        # Last expanding layer
        neurons = self.feature_map[0]
        cnn_chain = Conv3DTranspose(neurons, (1, 2, 2), strides=(1, 2, 2),
                                    padding='same')(cnn_chain)
        cnn_chain = concatenate([cnn_chain, contracting_convs[0]], axis=-1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)

        # Output Layer
        conv_out = Conv3D(n_labels, (1, 1, 1), activation=self.activation)(cnn_chain)
        # Create Model with associated input and output layers
        model = Model(inputs=[inputs], outputs=[conv_out])
        # Return model
        return model
Exemple #14
0
def MultiResUnet3D(height, width, z, n_channels):
    '''
    MultiResUNet3D
    
    Arguments:
        height {int} -- height of image 
        width {int} -- width of image
        z {int} -- length along z axis 
        n_channels {int} -- number of channels in image
    
    Returns:
        [keras model] -- MultiResUNet3D model
    '''


    inputs = Input((height, width, z, n_channels))

    mresblock1 = MultiResBlock(32, inputs) 
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock1)
    mresblock1 = ResPath(32, 4, mresblock1) 

    mresblock2 = MultiResBlock(32*2, pool1)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock2)
    mresblock2 = ResPath(32*2, 3,mresblock2) 

    mresblock3 = MultiResBlock(32*4, pool2)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock3)
    mresblock3 = ResPath(32*4, 2,mresblock3) 

    mresblock4 = MultiResBlock(32*8, pool3)
    pool4 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock4)
    mresblock4 = ResPath(32*8, 1,mresblock4) 

    mresblock5 = MultiResBlock(32*16, pool4)

    up6 = concatenate([Conv3DTranspose(32*8, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock5), mresblock4], axis=4)
    mresblock6 = MultiResBlock(32*8,up6)
    
    up7 = concatenate([Conv3DTranspose(32*4, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock6), mresblock3], axis=4)
    mresblock7 = MultiResBlock(32*4,up7)

    up8 = concatenate([Conv3DTranspose(32*2, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock7), mresblock2], axis=4)
    mresblock8 = MultiResBlock(32*2,up8)

    up9 = concatenate([Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock8), mresblock1], axis=4)
    mresblock9 = MultiResBlock(32,up9)

    conv10 = conv3d_bn(mresblock9 , 1, 1, 1, 1, activation='sigmoid')

    model = Model(inputs=[inputs], outputs=[conv10])

    return model
Exemple #15
0
def Unet3D(inputs, num_classes):
    x = inputs
    conv1 = Conv3D(32, 3, activation='relu', padding='same')(x)
    conv1 = Conv3D(64, 3, activation='relu', padding='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
    conv2 = Conv3D(64, 3, activation='relu', padding='same')(pool1)
    conv2 = Conv3D(128, 3, activation='relu', padding='same')(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
    conv3 = Conv3D(128, 3, activation='relu', padding='same')(pool2)
    conv3 = Conv3D(256, 3, activation='relu', padding='same')(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)

    conv4 = Conv3D(256, 3, activation='relu', padding='same')(pool3)
    conv4 = Conv3D(512, 3, activation='relu', padding='same')(conv4)
    drop4 = Dropout(0.5)(conv4)

    up6 = Conv3DTranspose(512,
                          2,
                          activation='relu',
                          strides=(2, 2, 2),
                          padding='valid')(drop4)
    merge6 = concatenate([conv3, up6], axis=-1)
    conv6 = Conv3D(256, 3, activation='relu', padding='same')(merge6)
    conv6 = Conv3D(256, 3, activation='relu', padding='same')(conv6)

    up7 = Conv3DTranspose(256,
                          2,
                          activation='relu',
                          strides=(2, 2, 2),
                          padding='valid')(conv6)
    merge7 = concatenate([conv2, up7], axis=-1)
    conv7 = Conv3D(128, 3, activation='relu', padding='same')(merge7)
    conv7 = Conv3D(128, 3, activation='relu', padding='same')(conv7)

    up8 = Conv3DTranspose(128,
                          2,
                          activation='relu',
                          strides=(2, 2, 2),
                          padding='same')(conv7)
    merge8 = concatenate([conv1, up8], axis=-1)
    conv8 = Conv3D(64, 3, activation='relu', padding='same')(merge8)
    conv8 = Conv3D(64, 3, activation='relu', padding='same')(conv8)

    conv10 = Conv3D(num_classes, 1)(conv8)
    outputs = tf.keras.layers.Activation('sigmoid', dtype='float32')(conv10)

    model = Model(inputs=inputs, outputs=outputs)

    return model
def make_dnn(**kwargs):
    inputs = Input(shape=(PROJ_SHAPE[0], PROJ_SHAPE[1], NUM_VIEWS))
    x = inputs

    # Encoder
    x = Conv2D(32, 7, activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    x = Conv2D(64, 3, activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    x = Conv2D(64, 3, activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    x = Conv2D(128, 3, activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    x = Conv2D(128, 3, activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    x = Conv2D(256, 3, activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    x = Flatten()(x)

    # FC
    x = Dense(4320, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Reshape((6, 6, 6, 20))(x)

    # Decoder
    x = Dropout(0.2)(x)
    x = UpSampling3D(size=(2, 2, 2))(x)
    x = Dropout(0.2)(x)
    x = Conv3DTranspose(256, 3, activation='relu', padding='valid')(x)
    x = Dropout(0.2)(x)
    x = UpSampling3D(size=(2, 2, 2))(x)
    x = Conv3DTranspose(128, 3, activation='relu', padding='valid')(x)
    x = Dropout(0.2)(x)
    x = UpSampling3D(size=(2, 2, 2))(x)
    x = Conv3DTranspose(64, 3, activation='relu', padding='valid')(x)
    x = Dropout(0.2)(x)
    x = Conv3DTranspose(1, 3, activation='sigmoid', padding='valid')(x)

    outputs = Reshape((64, 64, 64))(x)

    dnn = Model(inputs=inputs, outputs=outputs)
    return dnn
Exemple #17
0
 def __init__(self,
              filters,
              data_format=None,
              kernel_size=(3, 3, 3),
              activation=tf.nn.relu,
              residual_mode='add',
              *args,
              **kwargs):
     data_format = conv_utils.normalize_data_format(data_format)
     params = {
         'kernel_size': kernel_size,
         'activation': activation,
         'data_format': data_format,
         'residual_mode': residual_mode
     }
     layers = [
         SynthesisBlock(filters, **params),
         SynthesisBlock(filters // 2, **params),
         SynthesisBlock(filters // 4, **params),
         Conv3DTranspose(1,
                         kernel_size,
                         padding="same",
                         use_bias=True,
                         activation=activation,
                         data_format=data_format)
     ]
     super(SynthesisTransformProgressiveV2,
           self).__init__(layers, *args, **kwargs)
Exemple #18
0
    def upconv3d(inputs: Tensor,
                 skip_input: Tensor,
                 filters: int,
                 loop: int = 2) -> Tensor:
        def _crop_concat() -> Tensor:
            def crop(concat_layers: List[Tensor]) -> K:
                big, small = concat_layers
                big_shape, small_shape = tf.shape(big), tf.shape(small)
                sh, sw, sd = small_shape[1], small_shape[2], small_shape[3]
                bh, bw, bd = big_shape[1], big_shape[2], big_shape[3]
                dh, dw, dd = bh - sh, bw - sw, bd - sd
                big_crop = big[:, :-dh, :-dw, :-dd, :]
                return K.concatenate([small, big_crop], axis=-1)

            return Lambda(crop)

        x = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(inputs)
        x = Conv3DTranspose(filters, (2, 2, 2),
                            strides=(2, 2, 2),
                            use_bias=False,
                            padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)

        x = _crop_concat()([x, skip_input])
        x = conv3d(x, filters, downsizing=False, loop=loop)
        return x
Exemple #19
0
 def testOutput(self):
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution1D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution2D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution3D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Conv2DTranspose(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Conv3DTranspose(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(SeparableConv1D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(SeparableConv2D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(DepthwiseConv2D(0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
Exemple #20
0
def level_block(m, dim, depth, inc_rate, activation, dropout, batchnorm, pool_type,
                upconv, residual):
    if depth > 0:
        n = conv_block(m, dim, activation, batchnorm, residual)
        if pool_type == 0:
            m = MaxPooling3D(pool_size=(2, 2, 2))(n)
        elif pool_type == 1:
            m = AveragePooling3D(pool_size=(2, 2, 2))(n)
        else:
            Conv3D(dim, 3, strides=2, padding='same')(n)

        m = level_block(m, int(inc_rate*dim), depth-1, inc_rate, activation, dropout, batchnorm,
                        pool_type, upconv, residual)

        if upconv:
            m = UpSampling3D(size=(2, 2, 2))(m)
            diff_phi = n.shape[1] - m.shape[1]
            diff_r = n.shape[2] - m.shape[2]
            diff_z = n.shape[3] - m.shape[3]
            padding = [[int(diff_phi), 0], [int(diff_r), 0], [int(diff_z), 0]]
            if diff_phi != 0:
                m = SymmetryPadding3d(padding=padding, mode="SYMMETRIC")(m)
            elif (diff_r != 0 or diff_z != 0):
                m = SymmetryPadding3d(padding=padding, mode="CONSTANT")(m)
        else:
            m = Conv3DTranspose(dim, 3, strides=2, activation=activation,
                                padding='same')(m)
        n = concatenate([n, m])
        m = conv_block(n, dim, activation, batchnorm, residual)
    else:
        m = conv_block(m, dim, activation, batchnorm, residual, dropout)
    return m
Exemple #21
0
def trans_conv3d_bn(x, filters, num_row, num_col, num_z, padding='same', strides=(2, 2, 2), name=None):
    '''
    2D Transposed Convolutional layers

    Arguments:
        x {keras layer} -- input layer
        filters {int} -- number of filters
        num_row {int} -- number of rows in filters
        num_col {int} -- number of columns in filters
        num_z {int} -- length along z axis in filters

    Keyword Arguments:
        padding {str} -- mode of padding (default: {'same'})
        strides {tuple} -- stride of convolution operation (default: {(2, 2, 2)})
        name {str} -- name of the layer (default: {None})

    Returns:
        [keras layer] -- [output layer]
    '''


    x = Conv3DTranspose(filters, (num_row, num_col, num_z), strides=strides, padding=padding)(x)
    x = BatchNormalization(axis=4, scale=False)(x)

    return x
Exemple #22
0
    def make_generator_model(self):
        model = tf.keras.Sequential()
        reshape_start_channels = 256
        reshape_size = 2
        model.add(
            Dense(reshape_size * reshape_size * reshape_size *
                  reshape_start_channels,
                  use_bias=False,
                  input_shape=(self.latent_dim, )))
        model.add(BatchNormalization())
        model.add(LeakyReLU())

        model.add(
            Reshape((reshape_size, reshape_size, reshape_size,
                     reshape_start_channels)))
        model.add(
            Conv3DTranspose(reshape_start_channels,
                            4,
                            strides=1,
                            padding='same',
                            use_bias=False))
        model.add(BatchNormalization())
        model.add(LeakyReLU())

        model.add(
            Conv3DTranspose(128, 4, strides=2, padding='same', use_bias=False))
        model.add(BatchNormalization())
        model.add(LeakyReLU())

        model.add(
            Conv3DTranspose(64, 4, strides=2, padding='same', use_bias=False))
        model.add(BatchNormalization())
        model.add(LeakyReLU())

        model.add(
            Conv3DTranspose(32, 4, strides=2, padding='same', use_bias=False))
        model.add(BatchNormalization())
        model.add(LeakyReLU())

        model.add(
            Conv3DTranspose(1,
                            4,
                            strides=2,
                            padding='same',
                            use_bias=False,
                            activation='tanh'))
        return model
    def _getDecoderModel(self, encoded_dim, img_shape):
        """ Build Decoder Model Based on Paper Configuration
        Args:
            encoded_dim (int) : number of latent variables
            img_shape (tuple) : shape of target images
        Return:
            A sequential keras model
        """
        decoder = Sequential()
        decoder.add(Dense(128, activation='relu', input_dim=encoded_dim))
        decoder.add(Reshape((128, 1)))
        decoder.add(
            keras.layers.Conv1D(filters=108,
                                kernel_size=3,
                                strides=1,
                                padding="SAME",
                                activation='relu'))
        decoder.add(Reshape([3, 6, 6, 128]))
        decoder.add(
            Conv3DTranspose(filters=64,
                            kernel_size=3,
                            strides=(2, ) * 3,
                            padding="SAME",
                            activation='relu'))
        decoder.add(
            Conv3DTranspose(filters=32,
                            kernel_size=3,
                            strides=(2, ) * 3,
                            padding="SAME",
                            activation='relu'))
        decoder.add(
            Conv3DTranspose(filters=16,
                            kernel_size=3,
                            strides=(2, ) * 3,
                            padding="SAME",
                            activation='relu'))
        decoder.add(
            Conv3DTranspose(filters=1,
                            kernel_size=3,
                            strides=(2, ) * 3,
                            padding="SAME",
                            activation='relu'))

        #decoder.add(Dense(1000, activation='relu'))
        #decoder.add(Dense(np.prod(img_shape), activation='sigmoid'))
        decoder.summary()
        return decoder
Exemple #24
0
 def _upconv3d(self, inputs, skip_input, filters, se_block=True, se_ratio=16, loop=2):
     x = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(inputs)
     x = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(x)
     x = self._norm(x)
     x = self._activation(x)
     x = self._crop_concat()([x, skip_input])
     x = self._conv3d(x, filters, se_block=se_block, se_ratio=se_ratio, downsizing=False, loop=loop)
     return x
Exemple #25
0
    def up_block_3d(L, number_of_filters=64, kernel_size=(12, 12, 12), strides=(8, 8, 8),
                    include_dense_convolution_layer=True):
        if include_dense_convolution_layer == True:
            L = Conv3D(filters = number_of_filters,
                       use_bias=True,
                       kernel_size=(1, 1, 1),
                       strides=(1, 1, 1),
                       padding='same')(L)
            L = PReLU(alpha_initializer='zero',
                      shared_axes=[1, 2, 3])(L)

        # Scale up
        H0 = Conv3DTranspose(filters=number_of_filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             kernel_initializer='glorot_uniform',
                             padding='same')(L)
        H0 = PReLU(alpha_initializer='zero',
                   shared_axes=[1, 2, 3])(H0)

        # Scale down
        L0 = Conv3D(filters=number_of_filters,
                    kernel_size=kernel_size,
                    strides=strides,
                    kernel_initializer='glorot_uniform',
                    padding='same')(H0)
        L0 = PReLU(alpha_initializer='zero',
                   shared_axes=[1, 2, 3])(L0)

        # Residual
        E = Subtract()([L0, L])

        # Scale residual up
        H1 = Conv3DTranspose(filters=number_of_filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             kernel_initializer='glorot_uniform',
                             padding='same')(E)
        H1 = PReLU(alpha_initializer='zero',
                   shared_axes=[1, 2, 3])(H1)

        # Output feature map
        up_block = Add()([H0, H1])

        return(up_block)
Exemple #26
0
    def __init__(
        self,
        z_dim,
        w_dim,
        use_learnable_proj=True,  # TEMP: to replace with perspective projection
        w_shape=(4, 4, 4),
        upconv_flters=[128, 64],
        upconv_ks=[3, 3],  # for upconvolution, ks: kernel_size
        upconv_strides=[2, 2],
    ):
        super().__init__()

        self.z_dim = z_dim
        self.w_dim = w_dim
        self.w_shape = w_shape
        self.use_learnable_proj = use_learnable_proj

        self.adain0 = AdaIN(w_dim, z_dim)
        self.lrelu = LeakyReLU(alpha=0.2)
        self.w = tf.Variable(tf.random.normal((*w_shape, w_dim), stddev=0.02),
                             trainable=True)

        self.deconvs = []
        self.adains = []
        for filters, ks, stride in zip(upconv_flters, upconv_ks,
                                       upconv_strides):
            self.deconvs.append(
                Conv3DTranspose(
                    filters=filters,
                    kernel_size=ks,
                    strides=stride,
                    padding="same",
                    kernel_initializer=tf.initializers.RandomNormal(
                        stddev=0.02),
                    bias_initializer="zeros",
                ))
            self.adains.append(AdaIN(filters, z_dim))

        if use_learnable_proj:
            self.proj1 = K.layers.Conv3DTranspose(
                filters=upconv_flters[-1],
                kernel_size=(3, 3, 3),
                strides=(1, 1, 1),
                padding="same",
                kernel_initializer=tf.initializers.RandomNormal(stddev=0.02),
                bias_initializer="zeros",
            )
            self.proj2 = K.layers.Conv3DTranspose(
                filters=upconv_flters[-1],
                kernel_size=(3, 3, 3),
                strides=(1, 1, 1),
                padding="same",
                kernel_initializer=tf.initializers.RandomNormal(stddev=0.02),
                bias_initializer="zeros",
            )
        else:
            print('Using 3D affine transformations for objects')
Exemple #27
0
def get_up_convolution(n_filters, pool_size, kernel_size=(2,2,2), strides=(2, 2, 2), deconvolution=True):
    if deconvolution:
        return Conv3DTranspose(filters=n_filters, 
                               padding = 'same',
                               kernel_size=kernel_size,
                               strides=strides,
                               use_bias=False)
    else:
        return UpSampling3D(size=pool_size)
def AutoEncoderModel():
    # encoder
    X_input = Input((16, 128, 128, 3))

    X = Conv3D(32, 3, padding='same')(X_input)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid')(X)
    # current shape is 8x64x64x32
    X = Conv3D(48, 3, padding='same')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid')(X)
    # current shape is 4x32x32x48
    X = Conv3D(64, 3, padding='same')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid')(X)
    # current shape is 2x16x16x64
    X = Conv3D(64, 3, padding='same')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(X)
    # current shape is 2x16x16x64
    # decoder

    X = Conv3DTranspose(48, 2, strides=(2, 2, 2), padding='valid')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    # current shape is 4x32x32x48
    X = Conv3DTranspose(32, 2, strides=(2, 2, 2), padding='valid')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    # current shape is 8x64x64x32
    X = Conv3DTranspose(32, 2, strides=(2, 2, 2), padding='valid')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    # current shape is 16x128x128x32
    X = Conv3D(3, 3, strides=(1, 1, 1), padding='same')(X)
    X = Activation('sigmoid')(X)
    # current shape is 16x128x128x3

    model = Model(inputs=X_input, outputs=X, name='AutoEncoderModel')
    return model
Exemple #29
0
    def generator_convolution_transpose(self,
                                        x,
                                        nodes,
                                        use_dropout=None,
                                        use_batchnorm=True,
                                        skip_x=None,
                                        use_upsampling=False,
                                        use_sn=False,
                                        drop_out_rate=None,
                                        relu_leak_rate=0.0):
        """Convolution transpose block used for generator"""

        if use_dropout is None:
            use_dropout = self.gen_deconv_use_dropout
        if drop_out_rate is None:
            drop_out_rate = self.gen_deconv_drop_out_rate

        if skip_x is not None:
            x = concatenate([x, skip_x])
        if use_upsampling:
            x = UpSampling3D()(x)
            if use_sn:
                x = ConvSN3D(nodes,
                             kernel_size=self.filter_size,
                             padding='same',
                             use_bias=True)(x)
            else:
                x = Conv3D(nodes,
                           kernel_size=self.filter_size,
                           padding='same',
                           use_bias=True)(x)
        else:
            if use_sn:
                x = ConvSN3DTranspose(nodes,
                                      self.filter_size,
                                      strides=self.stride_size,
                                      padding="same",
                                      use_bias=True)(x)
            else:
                x = Conv3DTranspose(nodes,
                                    self.filter_size,
                                    strides=self.stride_size,
                                    padding="same",
                                    use_bias=True)(x)

        if use_batchnorm:
            x = BatchNormalization(momentum=0.99, epsilon=1e-3)(x)

        if use_dropout:
            x = CustomSpatialDropout3D(drop_out_rate)(x)

        x = LeakyReLU(alpha=relu_leak_rate)(
            x
        )  # Use LeakyReLU(alpha = 0) instead of ReLU because ReLU is buggy when saved

        return x
Exemple #30
0
    def deconv3d(layer_input,
                 skip_input,
                 filters,
                 axis=-1,
                 se_res_block=True,
                 se_ratio=16,
                 atten_gate=False):
        if atten_gate == True:
            gating = Conv3D(filters, (1, 1, 1), use_bias=False,
                            padding='same')(layer_input)
            gating = InstanceNormalization(axis=axis)(gating)
            attention = Conv3D(filters, (2, 2, 2),
                               strides=(2, 2, 2),
                               use_bias=False,
                               padding='valid')(skip_input)
            attention = InstanceNormalization(axis=axis)(attention)
            attention = add([gating, attention])
            attention = Conv3D(1, (1, 1, 1),
                               use_bias=False,
                               padding='same',
                               activation='sigmoid')(attention)
            # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[2],'ax':3})(attention) # error when None dimension is feeded.
            # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[3],'ax':2})(attention)
            attention = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(attention)
            attention = UpSampling3D((2, 2, 2))(attention)
            attention = CropToConcat3D(mode='crop')([attention, skip_input])
            attention = Lambda(lambda x: K.tile(x, [1, 1, 1, 1, filters]))(
                attention)
            skip_input = multiply([skip_input, attention])

        u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input)
        u1 = Conv3DTranspose(filters, (2, 2, 2),
                             strides=(2, 2, 2),
                             use_bias=False,
                             padding='same')(u1)
        u1 = InstanceNormalization(axis=axis)(u1)
        u1 = LeakyReLU(alpha=0.3)(u1)
        u1 = CropToConcat3D()([u1, skip_input])
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
        u2 = InstanceNormalization(axis=axis)(u2)
        u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2)
        u2 = InstanceNormalization(axis=axis)(u2)
        if se_res_block == True:
            se = GlobalAveragePooling3D()(u2)
            se = Dense(filters // se_ratio, activation='relu')(se)
            se = Dense(filters, activation='sigmoid')(se)
            se = Reshape([1, 1, 1, filters])(se)
            u2 = Multiply()([u2, se])
            shortcut = Conv3D(filters, (3, 3, 3),
                              use_bias=False,
                              padding='same')(u1)
            shortcut = InstanceNormalization(axis=axis)(shortcut)
            u2 = add([u2, shortcut])
        u2 = LeakyReLU(alpha=0.3)(u2)
        return u2