예제 #1
0
def get_model(x):
    x = layers.Conv3D(64, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    x = layers.Conv3D(128, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    x = layers.Conv3D(128, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    x = layers.Conv3D(256, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2))(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    #    x = layers.Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
    #                      activation='relu')(x)
    #  x = layers.SpatialDropout3D(0.5)(x)
    #    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = layers.Flatten()(x)
    return x
def get_model(width=128, height=128, depth=64):
    """build a 3D convolutional neural network model"""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=1, activation="sigmoid")(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
예제 #3
0
def get_model(width=32, height=32, depth=20):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=32, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=32, kernel_size=4, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=(1, 1, 2))(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=32, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 1))(x)
    x = layers.BatchNormalization()(x)
    x = layers.GlobalMaxPool3D()(x)
    x = layers.Dense(units=64, activation="relu")(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(units=128, activation="relu")(x)
    x = layers.Dropout(0.3)(x)
    outputs = layers.Dense(units=1, activation='sigmoid')(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
예제 #4
0
def build_network(input_shapes, output_size, training, name = 'TreatmentRecommder'):
    '''
    build the network for covid-19 prediction of how long a patient can be cured
    '''
    dtype = tf.float32
    #treatment information
    treatment_info = KL.Input(shape = input_shapes[0], dtype = dtype, name='treatment_info') 
   
    #imaing information: CNN features from CT images
    image_info = KL.Input(shape = input_shapes[1]+[1], dtype = dtype, name='image_info')   
    base_filters = 16    
    x11 = KL.Conv3D(base_filters, (3, 3, 3), activation='relu', padding='same', name = 'x11')(image_info)  
    x12 = KL.Conv3D(base_filters, (3, 3, 3), activation='relu', padding='same', name = 'x12')(x11)  
    x13 = KL.Conv3D(base_filters, (3, 3, 3), activation='relu', padding='same', name = 'x13')(x12) 
    
    d1 = KL.MaxPool3D()(x13)
    
    x21 = KL.Conv3D(base_filters*2, (3, 3, 3), activation='relu', padding='same', name = 'x21')(d1)  
    x22 = KL.Conv3D(base_filters*2, (3, 3, 3), activation='relu', padding='same', name = 'x22')(x21)  
    
    d2 = KL.MaxPool3D()(x22)
    
    x31 = KL.Conv3D(base_filters*4, (3, 3, 3), activation='relu', padding='same', name = 'x31')(d2)  
    x32 = KL.Conv3D(base_filters*4, (3, 3, 3), activation='relu', padding='same', name = 'x32')(x31)  
   
    d3 = KL.MaxPool3D()(x32)
    
    x41 = KL.Conv3D(base_filters*8, (3, 3, 3), activation='relu', padding='same', name = 'x41')(d3)  
    x42 = KL.Conv3D(base_filters*8, (3, 3, 3), activation='relu', padding='same', name = 'x42')(x41)  
  
    d4 = KL.MaxPool3D()(x42)
    
    x51 = KL.Conv3D(base_filters*16, (3, 3, 3), activation='relu', padding='same', name = 'x51')(d4)  
    x52 = KL.Conv3D(base_filters*16, (3, 3, 3), activation='relu', padding='same', name = 'x52')(x51)  
 
    d5 = KL.MaxPool3D()(x52)
    cnn_GAP = KL.GlobalAveragePooling3D(name='CNN_GAP')(d5)
    cnn_cof = KL.Dense(1, activation='relu', name='cnn_cof')(cnn_GAP)
    
    #patient information
    patient_info = KL.Input(shape = input_shapes[2], dtype = dtype, name='patient_info')
    pcnn_info = KL.Concatenate()([patient_info, cnn_cof])    
    
    #cured probability distruibution subnetwork
    w_pcnn_info = SA_Module(pcnn_info, training)
    
    fc1 = KL.Dense(256, activation='relu', name='fc1')(KL.Concatenate()([w_pcnn_info, cnn_GAP, treatment_info])) 
    fc2 = KL.Dense(512, activation='relu', name='fc2')(fc1) 
    fc3 = KL.Dense(512, activation='relu', name='fc3')(fc2) 
   
    fc_cls = KL.Dense(256, activation='relu', name='fc_cls')(fc3) 
    fc_cls = KL.Dropout(0.4)(fc_cls, training = training)
    severity_cls_preds = KL.Dense(output_size[0],activation='softmax', name='severity_cls_preds')(fc_cls)
    
    fc_reg = KL.Dense(256, activation='relu', name='fc_reg')(fc3)
    fc_reg = KL.Dropout(0.4)(fc_reg, training = training)
    risk_reg_preds = KL.Dense(output_size[1],activation='softmax', name='risk_reg_preds')(fc_reg)
    
    model = KM.Model([treatment_info,image_info,patient_info], [severity_cls_preds, risk_reg_preds], name=name)
    return model
예제 #5
0
def build_model(input_shape, target_size, dropout=0):
    """Construct the CosmoFlow 3D CNN model"""

    conv_args = dict(kernel_size=2, padding='valid')

    model = tf.keras.models.Sequential([
        layers.Conv3D(16, input_shape=input_shape, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Flatten(),
        layers.Dropout(dropout),
        layers.Dense(128),
        layers.LeakyReLU(),
        layers.Dropout(dropout),
        layers.Dense(64),
        layers.LeakyReLU(),
        layers.Dropout(dropout),
        layers.Dense(target_size, activation='tanh'),
        layers.Lambda(scale_1p2)
    ])

    return model
예제 #6
0
def create_model(height=240, width=320):
    # shape of input: 1 block has 10 frames x height x width x 3 channels (RGB)
    input = tf.keras.Input((10, height, width, 3))

    # 1st Conv3D block includes Conv3D with 8 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=8, kernel_size=(3, 3, 3),
                      activation='relu')(input)
    x = layers.MaxPool3D(pool_size=(2, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # 2nd Conv3D block includes Conv3D with 16 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # 3rd Conv3D block includes Conv3D with 32 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3),
                      activation='relu')(input)
    x = layers.MaxPool3D(pool_size=(1, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # Fully-connected block includes GlobalAveragePooling3D, Fully-Connected layer with 512 units and DropOut for Regularization
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation='relu')(x)
    x = layers.DropOut(0.7)(x)

    # output shape (1,) produces value between [0, 1]
    output = layers.Dense(units=1, activation='sigmoid')(x)

    model = tf.keras.Model(input, output, name='3DCNN')
    return model
    def setup_model(self):
        nl = self.hparams['num_latent_layers']
        autoencoder_layers = [
            tfl.Conv3D(64, (2, 2, 2), padding="same", name='conv_4_conv'),
            tfl.Activation(tf.nn.relu, name='conv_4_activation'),
            tfl.MaxPool3D((2, 2, 2), name='conv_4_maxpool'),
            tfl.Conv3D(128, (2, 2, 2), padding="same", name='conv_3_conv'),
            tfl.Activation(tf.nn.relu, name='conv_3_activation'),
            tfl.MaxPool3D((2, 2, 2), name='conv_3_maxpool'),
            tfl.Conv3D(256, (2, 2, 2), padding="same", name='conv_2_conv'),
            tfl.Activation(tf.nn.relu, name='conv_2_activation'),
            tfl.MaxPool3D((2, 2, 2), name='conv_2_maxpool'),
            tfl.Conv3D(512, (2, 2, 2), padding="same", name='conv_1_conv'),
            tfl.Activation(tf.nn.relu, name='conv_1_activation'),
            tfl.MaxPool3D((2, 2, 2), name='conv_1_maxpool'),
            tfl.Flatten(name='flatten'),
            tfl.Dense(nl, activation='relu', name='latent'),
            tfl.Dense(32768, activation='relu', name='expand'),
            tfl.Reshape((4, 4, 4, 512), name='reshape'),
            tfl.Conv3DTranspose(256, (
                2,
                2,
                2,
            ),
                                strides=2,
                                name='deconv_1_deconv'),
            tfl.Activation(tf.nn.relu, name='deconv_1_activation'),
            tfl.Conv3DTranspose(128, (
                2,
                2,
                2,
            ),
                                strides=2,
                                name='deconv_2_deconv'),
            tfl.Activation(tf.nn.relu, name='deconv_2_activation'),
            tfl.Conv3DTranspose(64, (
                2,
                2,
                2,
            ),
                                strides=2,
                                name='deconv_3_deconv'),
            tfl.Activation(tf.nn.relu, name='deconv_3_activation'),
            tfl.Conv3DTranspose(1, (
                2,
                2,
                2,
            ),
                                strides=2,
                                name='deconv_4_deconv'),
            # tfl.Activation(tf.nn.relu,                    name='deconv_4_activation'),

            # tfl.Conv3DTranspose(1, (2,2,2,), strides=1,   name='deconv_5_deconv', padding="same"),
        ]

        for l in autoencoder_layers:
            self._add_layer(l)
예제 #8
0
def cnn3d(stand_alone=True):
    inputs = layers.Input(shape=settings.TENSOR_SHAPE, name='rgb')
    # 3D Convolutional Layers
    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      strides=2,
                      padding='same',
                      activation="relu",
                      kernel_regularizer=regularizers.l2(l2=1e-4),
                      bias_regularizer=regularizers.l2(l2=1e-4))(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)

    x = layers.Conv3D(filters=128,
                      kernel_size=3,
                      padding='same',
                      strides=1,
                      activation="relu",
                      kernel_regularizer=regularizers.l2(1e-4),
                      bias_regularizer=regularizers.l2(l2=1e-4))(x)
    x = layers.MaxPool3D(pool_size=2, padding='same')(x)

    x = layers.Conv3D(filters=256,
                      kernel_size=3,
                      strides=1,
                      padding='same',
                      activation="relu",
                      kernel_regularizer=regularizers.l2(1e-4),
                      bias_regularizer=regularizers.l2(l2=1e-4))(x)
    x = layers.MaxPool3D(pool_size=2, padding='same')(x)

    x = layers.Conv3D(filters=256,
                      kernel_size=3,
                      strides=1,
                      padding='same',
                      activation="relu",
                      kernel_regularizer=regularizers.l2(1e-4),
                      bias_regularizer=regularizers.l2(l2=1e-4))(x)
    x = layers.MaxPool3D(pool_size=2, padding='same')(x)

    x = layers.Flatten()(x)
    if stand_alone:
        x = layers.Dense(units=512, activation="relu")(x)
        x = layers.Dropout(0.5)(x)

        outputs = layers.Dense(units=settings.NUM_CLASSES,
                               activation="softmax")(x)
    else:
        x = layers.Dense(units=2048, activation="relu")(x)
        x = layers.Dropout(0.4)(x)
        outputs = layers.Dense(units=1024, activation="relu")(x)

    # Define the model.
    model = Model(inputs, outputs, name="3dcnn")

    return model
    def setup_model(self):

        nl = self.params['num_latent_layers']
        
        autoencoder_layers = [
            tfl.Conv3D(64, (2,2,2), padding="same",  name='conv_4_conv'),
            tfl.Activation(tf.nn.relu,               name='conv_4_activation'),
            tfl.MaxPool3D((2,2,2),                   name='conv_4_maxpool'),

            tfl.Conv3D(128, (2,2,2), padding="same", name='conv_3_conv'),
            tfl.Activation(tf.nn.relu,               name='conv_3_activation'),
            tfl.MaxPool3D((2,2,2),                   name='conv_3_maxpool'),

            tfl.Conv3D(256, (2,2,2), padding="same", name='conv_2_conv'),
            tfl.Activation(tf.nn.relu,               name='conv_2_activation'),
            tfl.MaxPool3D((2,2,2),                   name='conv_2_maxpool'),

            tfl.Conv3D(512, (2,2,2), padding="same", name='conv_1_conv'),
            tfl.Activation(tf.nn.relu,               name='conv_1_activation'),
            tfl.MaxPool3D((2,2,2),                   name='conv_1_maxpool'),

            tfl.Flatten(                             name='flatten'),

            tfl.Dense(nl, activation='relu',         name='latent'),
            
            tfl.Dense(32768, activation='relu',      name='expand'),
            tfl.Reshape((4,4,4,512),                 name='reshape'),
            

            tfl.Conv3DTranspose(256, (2,2,2,), strides=2, name='deconv_1_deconv'),
            tfl.Activation(tf.nn.relu,                    name='deconv_1_activation'),
            tfl.Conv3DTranspose(128, (2,2,2,), strides=2, name='deconv_2_deconv'),
            tfl.Activation(tf.nn.relu,                    name='deconv_2_activation'),
            tfl.Conv3DTranspose(64, (2,2,2,), strides=2,  name='deconv_3_deconv'),
            tfl.Activation(tf.nn.relu,                    name='deconv_3_activation'),
            
            tfl.Conv3DTranspose(1, (2,2,2,), strides=2,   name='deconv_4_deconv'),
            # tfl.Activation(tf.nn.relu,                    name='deconv_4_activation'),
            
            # tfl.Conv3DTranspose(1, (2,2,2,), strides=1,   name='deconv_5_deconv', padding="same"),
        ]
        if self.params['is_u_connected'] and self.params['use_final_unet_layer']:
            extra_unet_layers = [
                tfl.Conv3D(2, (1,1,1,), use_bias=False,                  name='unet_combine'),
                # tfl.Activation(tf.nn.relu,                             name='unet_final_activation'),
            ]
            if self.params['final_activation'] == 'sigmoid':
                extra_unet_layers.append(tfl.Activation(tf.math.sigmoid, name='unet_final_activation'))
            if self.params['final_activation'] == 'relu':
                extra_unet_layers.append(tfl.Activation(tf.nn.relu,      name='unet_final_activation'))

            autoencoder_layers = autoencoder_layers + extra_unet_layers

        for l in autoencoder_layers:
            self._add_layer(l)
def make_encoder(inp_shape, batch_size, params):
    """Encoder of the autoencoder"""
    inputs = inputs = {
        'conditioned_occ': tf.keras.Input(batch_size=batch_size,
                                          shape=inp_shape),
        'known_occ': tf.keras.Input(batch_size=batch_size, shape=inp_shape),
        'known_free': tf.keras.Input(batch_size=batch_size, shape=inp_shape),
    }
    # Autoencoder
    x = tfl.concatenate([inputs['known_occ'], inputs['known_free']], axis=4)

    for n_filter in [64, 128, 256, 512]:
        x = tfl.Conv3D(n_filter, (
            2,
            2,
            2,
        ), use_bias=True, padding="same")(x)
        x = tfl.Activation(tf.nn.relu)(x)
        x = tfl.MaxPool3D((2, 2, 2))(x)

    x = tfl.Flatten()(x)
    x = tfl.Dense(params['num_latent_layers'], activation='relu')(x)
    x = tfl.Dense(32768, activation='relu')(x)
    x = tfl.Reshape((4, 4, 4, 512))(x)
    auto_encoder_features = x
    return tf.keras.Model(inputs=inputs, outputs=auto_encoder_features)
def build_model(columns, rows, depth, output_units, kernel_size):
    # Based on: https://keras.io/examples/vision/3D_image_classification/
    pool_size = 2

    # Handle small dimensions
    if (columns <= 2 or rows <= 2):
        pool_size = 1
        kernel_size = 1

    # Handle odd dimension lengths
    if ((columns % 2 != 0) or (rows % 2 != 0)):
        kernel_size = 1

    inputs = keras.Input((depth, columns, rows, 1))
    x = layers.Conv3D(filters=64, kernel_size=kernel_size,
                      activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=pool_size)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=output_units, activation="sigmoid")(x)

    model = keras.Model(inputs, outputs, name="3DCNN")
    return model
예제 #12
0
    def __init__(self, kernel: tuple, stride=2, padding=0, scope='MPOOL'):
        super(MaxPool, self).__init__(scope)
        dim = len(kernel)
        is_global = True if kernel[-1] < 0 else False
        if is_global:
            assert padding == 0

        if dim == 1:
            if is_global:
                self.pool = _MP(1)
            else:
                self.pool = layers.MaxPool1D(kernel, stride)
            pad_fn = layers.ZeroPadding1D
        elif dim == 2:
            if is_global:
                self.pool = _MP(2)
            else:
                self.pool = layers.MaxPool2D(kernel, stride)
            pad_fn = layers.ZeroPadding2D
        elif dim == 3:
            if is_global:
                self.pool = _MP(3)
            else:
                self.pool = layers.MaxPool3D(kernel, stride)
            pad_fn = layers.ZeroPadding3D
        else:
            raise Exception('NEBULAE ERROR ⨷ %d-d pooling is not supported.' % dim)

        if isinstance(padding, int):
            padding = dim * [[padding, padding]]
        elif isinstance(padding, (list, tuple)):
            padding = [(padding[2*d], padding[2*d+1]) for d in range(dim-1, -1, -1)]

        self.pad = pad_fn(padding)
    def __init__(self,
                 n_layer,
                 root_filters,
                 kernal_size=3,
                 pool_size=2,
                 use_bn=True,
                 use_res=True,
                 padding='SAME'):
        super().__init__()
        self.dw_layers = dict()
        self.max_pools = dict()
        for layer in range(n_layer):
            filters = 2**layer * root_filters
            dict_key = str(n_layer - layer - 1)
            dw = _DownSampling(filters, kernal_size, 'dw_%d' % layer, use_bn,
                               use_res)
            self.dw_layers[dict_key] = dw
            pool = layers.MaxPool3D(pool_size, padding=padding)
            self.max_pools[dict_key] = pool

        self.flat = layers.Flatten()
        self.f1 = layers.Dense(2048, use_bias=True, name='f1')
        self.f2 = layers.Dense(512, use_bias=True, name='f2')
        self.f3 = layers.Dense(64, use_bias=True, name='f3')
        self.f_out = layers.Dense(1, use_bias=False, name='f_out')
예제 #14
0
    def build_down_sampling_block(
            self, filters: int, kernel_size: int, padding: str,
            strides: int) -> Union[tf.keras.Model, tfkl.Layer]:
        """
        Build a block for down-sampling.

        This block changes the tensor shape (width, height, depth),
        but it does not changes the number of channels.

        :param filters: number of channels for output, arg for conv3d
        :param kernel_size: arg for pool3d or conv3d
        :param padding: arg for pool3d or conv3d
        :param strides: arg for pool3d or conv3d
        :return: a block consists of one or multiple layers
        """
        if self._pooling:
            return tfkl.MaxPool3D(pool_size=kernel_size,
                                  strides=strides,
                                  padding=padding)
        else:
            return layer.Conv3dBlock(
                filters=filters,
                kernel_size=kernel_size,
                strides=strides,
                padding=padding,
            )
예제 #15
0
def vgg_3d_v1(input_shape=(38, 38, 6, 1), n_filters=32, kernel_size=(3, 3, 3)):
    # No normalization, padding first to 40x40x8, same padding, max pooling
    model = Sequential()

    model.add(layers.ZeroPadding3D(padding=(1, 1, 1),
                                   input_shape=input_shape))  # 40x40x8

    model.add(
        layers.Conv3D(filters=n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv3D(filters=n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(layers.MaxPool3D(pool_size=(2, 2, 2)))  # 20x20x4

    model.add(
        layers.Conv3D(filters=2 * n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv3D(filters=2 * n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv3D(filters=2 * n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(layers.MaxPool3D(pool_size=(2, 2, 2)))  # 10x10x2

    model.add(layers.Flatten())
    model.add(layers.Dense(units=4 * n_filters, activation='relu'))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(units=4 * n_filters, activation='relu'))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(units=1, activation='sigmoid'))
    return model
def make_encoder(inp_shape, params):
    """Basic VAE encoder"""
    n_features = params['num_latent_layers']
    return tf.keras.Sequential([
        tfl.InputLayer(input_shape=inp_shape),
        tfl.Conv3D(64, (2, 2, 2), padding="same"),
        tfl.Activation(tf.nn.relu),
        tfl.MaxPool3D((2, 2, 2)),
        tfl.Conv3D(128, (2, 2, 2), padding="same"),
        tfl.Activation(tf.nn.relu),
        tfl.MaxPool3D((2, 2, 2)),
        tfl.Conv3D(256, (2, 2, 2), padding="same"),
        tfl.Activation(tf.nn.relu),
        tfl.MaxPool3D((2, 2, 2)),
        tfl.Conv3D(512, (2, 2, 2), padding="same"),
        tfl.Activation(tf.nn.relu),
        tfl.MaxPool3D((2, 2, 2)),
        tfl.Flatten(),
        tfl.Dense(n_features * 2)
    ])
예제 #17
0
def get_model(input_shape=(None, None, None, 1)):
    inputs = layers.Input(shape=input_shape)
    x = layers.Conv3D(filters=96, kernel_size=1, activation="relu")(inputs)
    x = layers.MaxPool3D(strides=2)(x)
    x = layers.BatchNormalization()(x)
    x = fire_block(x, 128)
    x = fire_block(x, 128)
    x = fire_block(x, 256)
    x = layers.MaxPool3D(strides=2)(x)
    x = fire_block(x, 256)
    x = fire_block(x, 384)
    x = fire_block(x, 384)
    x = fire_block(x, 512)
    x = layers.MaxPool3D(strides=2)(x)
    x = fire_block(x, 512)
    x = layers.Conv3D(filters=1, kernel_size=1, activation="relu")(x)
    x = layers.BatchNormalization()(x)
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Softmax()(x)
    return tf.keras.Model(inputs=inputs, outputs=x, name="SqueezeNet")
예제 #18
0
def create_model():
    
    inputs = tf.keras.Input(shape=(image_size, image_size, 3, 1))
    prev_inputs = tf.keras.Input(shape=(image_size//2, image_size//2, 3, 6))
    
    # Layer 1:
    x11 = layers.Conv3D(3,(9,9,3),strides=(2,2,1), padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(inputs)
    x11 = nonlinearity(x11)
    x12 = layers.Conv3D(3,(6,6,3),strides=(2,2,1), padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(inputs)
    x12 = nonlinearity(x12)
    x13 = layers.Conv3D(3,(3,3,3),strides=1, padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(inputs)
    x13 = layers.MaxPool3D(pool_size=(2,2,1))(x13)
    x13 = nonlinearity(x13)
    x1 = layers.Concatenate(axis=4)([x11,x12,x13])
    # x1 = (batch, x//2, y//2, 3, 9) # layer 1 output
    
    # Layer 2:
    x21 = layers.Conv3D(6,(7,7,3),strides=1, padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(x1)
    x21 = nonlinearity(x21)
    x22 = layers.Conv3D(6,(7,7,3),strides=1, dilation_rate = (2,2,2), padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(x1)
    rnn_input = nonlinearity(x22)
    x23 = layers.Conv3D(6,(3,3,3),strides=1, padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(x1)
    x23 = nonlinearity(x23)
    x2 = layers.Concatenate(axis=4)([x21,x23,prev_inputs])
    # x2 = (batch, x//2, y//2, z//2, 18) # layer 2 output
    # rnn_input = (batch, x//2, y//2, z//2, 6)
    
    # RNN layer
    rnn_out = layers.Conv3D(6,(7,7,3),strides=1, dilation_rate = (3,3,3), padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(rnn_input)
    # rnn_out = (batch, x//2, y//2, z//2, 6)
    
    #Layer 3:
    x3 = layers.Conv3D(1, (5,5,3), strides=1, padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(x2)

    main_out = layers.Conv3DTranspose(1,(3,3,3),strides=(2,2,1), padding='SAME', kernel_initializer=tf.random_normal_initializer(0,0.02))(x3)
    main_out = layers.MaxPool3D(pool_size=(1,1,3))(main_out)
    main_out = tf.keras.activations.sigmoid(main_out)
    # main_out = (batch, x, y, 1)
    
    model = tf.keras.Model(inputs=[inputs,prev_inputs], outputs=[main_out, rnn_out])
    return model
def get_model_8L_(width=128, height=128, depth=64):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.2)(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.5)(x)


    ### add 64N layer ##################################################
    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.3)(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=1)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.3)(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=1, activation="sigmoid")(x)
    # outputs = layers.Dense(units=1, activation="tanh", )(x)
    # outputs = layers.Dense(units=1, activation="relu", )(x)

    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
예제 #20
0
def get_model(width=224, height=224, depth=32):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=51, activation="softmax")(x)

    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
예제 #21
0
def FirstConvolution(inputs):
    x = layers.BatchNormalization()(inputs)
    x = layers.Activation('relu')(x)
    x = layers.Conv3D(filters=bn_size * growth_rate,
                      kernel_size=(3, 7, 7),
                      activation=None,
                      strides=2,
                      padding='same',
                      use_bias=False)(x)

    x = layers.MaxPool3D(pool_size=(2, 2, 2), strides=1, padding='same')(x)
    return x
예제 #22
0
def build_model(input_shape, target_size, dropout=0):
    conv_args = dict(kernel_size=2, padding="valid")

    model = tf.keras.models.Sequential([
        layers.Conv3D(16, input_shape=input_shape, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        #
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        #
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        #
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        #
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        #
        layers.Flatten(),
        layers.Dropout(dropout),
        #
        layers.Dense(128),
        layers.LeakyReLU(),
        layers.Dropout(dropout),
        #
        layers.Dense(64),
        layers.LeakyReLU(),
        layers.Dropout(dropout),
        #
        layers.Dense(target_size, activation="tanh"),
        layers.Lambda(scale_1p2),
    ])

    return model
    def __init__(self, hparams: Dict, batch_size: int, scenario: Base3DScenario):
        super().__init__(hparams, batch_size)
        self.scenario = scenario

        self.raster_debug_pubs = [
            rospy.Publisher(f'classifier_raster_debug_{i}', OccupancyStamped, queue_size=10, latch=False) for i in
            range(4)]
        self.local_env_bbox_pub = rospy.Publisher('local_env_bbox', BoundingBox, queue_size=10, latch=True)

        self.classifier_dataset_hparams = self.hparams['classifier_dataset_hparams']
        self.dynamics_dataset_hparams = self.classifier_dataset_hparams['fwd_model_hparams']['dynamics_dataset_hparams']
        self.true_state_keys = self.classifier_dataset_hparams['true_state_keys']
        self.pred_state_keys = [add_predicted(k) for k in self.classifier_dataset_hparams['predicted_state_keys']]
        self.pred_state_keys.append(add_predicted('stdev'))
        self.local_env_h_rows = self.hparams['local_env_h_rows']
        self.local_env_w_cols = self.hparams['local_env_w_cols']
        self.local_env_c_channels = self.hparams['local_env_c_channels']
        self.rope_image_k = self.hparams['rope_image_k']

        # TODO: add stdev to states keys?
        self.state_keys = self.hparams['state_keys']
        self.action_keys = self.hparams['action_keys']

        self.conv_layers = []
        self.pool_layers = []
        for n_filters, kernel_size in self.hparams['conv_filters']:
            conv = layers.Conv3D(n_filters,
                                 kernel_size,
                                 activation='relu',
                                 kernel_regularizer=keras.regularizers.l2(self.hparams['kernel_reg']),
                                 bias_regularizer=keras.regularizers.l2(self.hparams['bias_reg']))
            pool = layers.MaxPool3D(self.hparams['pooling'])
            self.conv_layers.append(conv)
            self.pool_layers.append(pool)

        if self.hparams['batch_norm']:
            self.batch_norm = layers.BatchNormalization()

        self.dense_layers = []
        for hidden_size in self.hparams['fc_layer_sizes']:
            dense = layers.Dense(hidden_size,
                                 activation='relu',
                                 kernel_regularizer=keras.regularizers.l2(self.hparams['kernel_reg']),
                                 bias_regularizer=keras.regularizers.l2(self.hparams['bias_reg']))
            self.dense_layers.append(dense)

        # self.local_env_shape = (self.local_env_h_rows, self.local_env_w_cols, self.local_env_c_channels)
        # self.encoder = tf.keras.applications.ResNet50(include_top=False, weights=None, input_shape=self.local_env_shape)

        self.lstm = layers.LSTM(self.hparams['rnn_size'], unroll=True, return_sequences=True)
        self.output_layer = layers.Dense(1, activation=None)
        self.sigmoid = layers.Activation("sigmoid")
예제 #24
0
 def __init__(self):
     super(Sunset3DModel, self).__init__()
     self.conv1 = layers.Conv3D(12, (3, 3, 3),
                                activation='relu',
                                padding='same')
     self.conv2 = layers.Conv3D(24, (3, 3, 3),
                                activation='relu',
                                padding='same')
     self.maxpooling = layers.MaxPool3D(pool_size=(2, 2, 2))
     self.flatten = layers.Flatten()
     self.dense1 = layers.Dense(1024, activation=tf.nn.relu)
     self.dense2 = layers.Dense(1024, activation=tf.nn.relu)
     self.dense3 = layers.Dense(4, activation=None)
예제 #25
0
def FirstConvolution(inputs):
  x = layers.BatchNormalization()(inputs)
  x = layers.Activation('relu')(x)
  x = layers.Conv3D(filters=16,
                    kernel_size=(3, 7, 7),
                    activation=None,
                    strides=2,
                    padding='same',
                    use_bias=False)(x)
  print('First 3D Conv:', x.shape)
  x = layers.MaxPool3D(pool_size=(3, 3, 3),
                       strides=(1, 2, 2),
                       padding='same')(x)
  return x
예제 #26
0
def create_model_fc(input, output):
    print('Creating model...')

    model = models.Sequential(
        [
            layers.Conv3D(filters=16,
                          kernel_size=3,
                          activation='relu',
                          input_shape=input),
            layers.Dropout(rate=0.4),
            layers.Conv3D(filters=16, kernel_size=3, activation='relu'),
            layers.Dropout(rate=0.4),
            layers.MaxPool3D(pool_size=2),
            layers.Conv3D(filters=32, kernel_size=3, activation='relu'),
            layers.Dropout(rate=0.4),
            layers.Conv3D(filters=32, kernel_size=3, activation='relu'),
            layers.Dropout(rate=0.4),
            layers.MaxPool3D(pool_size=2),
            layers.Conv3D(filters=64, kernel_size=3, activation='relu'),
            layers.Dropout(rate=0.4),
            layers.Conv3D(filters=64, kernel_size=3, activation='relu'),
            layers.Dropout(rate=0.4),
            layers.MaxPool3D(pool_size=2),
            layers.Flatten(),
            # layers.Dense(10, activation='relu'),
            # layers.Dropout(0.2),
            layers.Dense(output, activation='softmax')
        ],
        name='VGG-6_3D')
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    print('Creation successful!')

    return model
def make_stack_net_v3(inp_shape, batch_size, params):
    """
    Just an Autoencoder. Used to verify against v4
    """
    filter_size = [2, 2, 2]
    n_filters = [64, 128, 256, 512]

    inputs = {
        'conditioned_occ': tf.keras.Input(batch_size=batch_size,
                                          shape=inp_shape),
        'known_occ': tf.keras.Input(batch_size=batch_size, shape=inp_shape),
        'known_free': tf.keras.Input(batch_size=batch_size, shape=inp_shape),
    }

    # Autoencoder
    x = tfl.concatenate([inputs['known_occ'], inputs['known_free']], axis=4)

    for n_filter in [64, 128, 256, 512]:
        x = tfl.Conv3D(n_filter, (
            2,
            2,
            2,
        ), use_bias=True, padding="same")(x)
        x = tfl.Activation(tf.nn.relu)(x)
        x = tfl.MaxPool3D((2, 2, 2))(x)

    x = tfl.Flatten()(x)
    x = tfl.Dense(params['num_latent_layers'], activation='relu')(x)
    x = tfl.Dense(32768, activation='relu')(x)
    x = tfl.Reshape((4, 4, 4, 512))(x)

    for n_filter in [256, 128, 64, 12]:
        x = tfl.Conv3DTranspose(n_filter, (
            2,
            2,
            2,
        ),
                                use_bias=True,
                                strides=2)(x)
        x = tfl.Activation(tf.nn.relu)(x)

    x = tfl.Conv3D(1, (1, 1, 1), use_bias=True)(x)
    x = tfl.Activation(tf.nn.sigmoid)(x)

    output = {"predicted_occ": x, "predicted_free": 1 - x}
    return tf.keras.Model(inputs=inputs, outputs=output)
def build_model(columns, rows, depth, output_units):
    # Based on: https://keras.io/examples/vision/3D_image_classification/

    inputs = keras.Input((depth, columns, rows, 1))
    x = layers.Conv3D(filters=64,
                      kernel_size=settings.KERNEL_SIZE,
                      activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=output_units, activation="sigmoid")(x)

    model = keras.Model(inputs, outputs, name="3DCNN")
    return model
예제 #29
0
    def __init__(self,
                 n_class,
                 n_layer,
                 root_filters,
                 kernal_size=3,
                 pool_size=2,
                 use_bn=False,
                 use_res=True,
                 padding='SAME',
                 concat_or_add='concat'):
        super().__init__()
        self.dw_layers = dict()
        self.up_layers = dict()
        self.max_pools = dict()
        for layer in range(n_layer):
            filters = 2**layer * root_filters
            dict_key = str(n_layer - layer - 1)
            dw = _DownSampling(filters, kernal_size, 'dw_%d' % layer, use_bn,
                               use_res)
            self.dw_layers[dict_key] = dw
            if layer < n_layer - 1:
                pool = layers.MaxPool3D(pool_size, padding=padding)
                self.max_pools[dict_key] = pool

        for layer in range(n_layer - 2, -1, -1):
            filters = 2**(layer + 1) * root_filters
            dict_key = str(n_layer - layer - 1)
            up = _UpSampling(filters, kernal_size, pool_size, concat_or_add)
            self.up_layers[dict_key] = up

        stddev = np.sqrt(2 / (kernal_size**2 * root_filters))
        self.conv_out = layers.Conv3D(
            n_class,
            1,
            padding=padding,
            use_bias=False,
            kernel_initializer=initializers.TruncatedNormal(stddev=stddev),
            name='conv_out')
    filter_size = [2,2,2]
    n_filters = [64, 128, 256, 512]

    inputs = {'conditioned_occ':tf.keras.Input(batch_size=batch_size, shape=inp_shape),
              'known_occ':tf.keras.Input(batch_size=batch_size, shape=inp_shape),
              'known_free':tf.keras.Input(batch_size=batch_size, shape=inp_shape),
    }


    # Autoencoder
    x = tfl.concatenate([inputs['known_occ'], inputs['known_free']], axis=4)

    for n_filter in [64, 128, 256, 512]:
        x = tfl.Conv3D(n_filter, (2,2,2,), use_bias=True, padding="same")(x)
        x = tfl.Activation(tf.nn.relu)(x)
        x = tfl.MaxPool3D((2,2,2))(x)

    x = tfl.Flatten()(x)
    x = tfl.Dense(params['num_latent_layers'], activation='relu')(x)
    x = tfl.Dense(32768, activation='relu')(x)
    x = tfl.Reshape((4,4,4,512))(x)

    for n_filter in [256, 128, 64, 12]:
        x = tfl.Conv3DTranspose(n_filter, (2,2,2,), use_bias=True, strides=2)(x)
        x = tfl.Activation(tf.nn.relu)(x)

    x = tfl.Conv3D(1, (1,1,1), use_bias=True)(x)
    x = tfl.Activation(tf.nn.sigmoid)(x)

    output = {"predicted_occ":x, "predicted_free":1 - x}
    return tf.keras.Model(inputs=inputs, outputs=output)