예제 #1
0
def _down_block(tensor,
                num_filters,
                kernel_size=3,
                padding='same',
                strides=2,
                shortcut=True,
                activation='lrelu',
                dropout_rate=None,
                dropout_wrn=False,
                down_sampling='strided_conv',
                initializer='orthogonal',
                batchnorm=True,
                name=''):

    tensor = Conv3D(kernel_size=1, filters=num_filters)(tensor)
    tensor = _resnet_block(tensor,
                           num_filters,
                           kernel_size,
                           shortcut=shortcut,
                           padding=padding,
                           activation=activation,
                           initializer='orthogonal',
                           dropout_rate=dropout_rate,
                           dropout_wrn=dropout_wrn,
                           batchnorm=batchnorm,
                           name=name)

    skip_tensor = tensor

    # down-sampling
    if down_sampling == 'strided_conv':
        tensor = Conv3D(filters=num_filters,
                        kernel_size=kernel_size * 2 - 1,
                        strides=strides,
                        padding=padding,
                        kernel_initializer=initializer)(tensor)

    elif down_sampling == 'maxpool':
        tensor = Conv3D(filters=num_filters,
                        kernel_size=kernel_size,
                        padding=padding,
                        kernel_initializer=initializer)(tensor)

        tensor = MaxPool3D(strides)(tensor)

    elif down_sampling == 'avgpool':
        tensor = Conv3D(filters=num_filters,
                        kernel_size=kernel_size,
                        padding=padding,
                        kernel_initializer=initializer)(tensor)

        tensor = AvgPool2D(strides)(tensor)

    else:
        raise ValueError(
            'down_sampling should be one of [ \'strided_conv\', \'maxpool\' ]')

    return tensor, skip_tensor
    def cnn_model_3d_mdn(self,
                         voxel_dim,
                         deviation_channels,
                         num_of_mixtures=5):
        """Build the 3D Model with a Mixture Density Network output the gives parameters of a Gaussian Mixture Model as output, to be used if the system is expected to be collinear (Multi-Stage Assembly Systems) i.e. a single input can have multiple outputs
			Functions for predicting and sampling from a MDN.py need to used when deploying a MDN based model
			refer https://publications.aston.ac.uk/id/eprint/373/1/NCRG_94_004.pdf for more details on the working of a MDN model
			refer https://arxiv.org/pdf/1709.02249.pdf to understand how a MDN model can be leveraged to estimate the epistemic and aleatoric unceratninty present in manufacturing sytems based on the data collected

			:param voxel_dim: The voxel dimension of the input, reuired to build input to the 3D CNN model
			:type voxel_dim: int (required)

			:param voxel_channels: The number of voxel channels in the input structure, required to build input to the 3D CNN model
			:type voxel_channels: int (required)

			:param number_of_mixtures: The number of mixtures in the Gaussian Mixture Model output, defaults to 5, can be increased if higher collinearity is expected
			:type number_of_mixtures: int
		"""

        assert self.model_type == "regression", "Mixture Density Network Should be a Regression Model"

        from tensorflow.keras.layers import Conv3D, MaxPool3D, Flatten, Dense
        from tensorflow.keras.models import Sequential
        import mdn

        model = Sequential()
        model.add(
            Conv3D(32,
                   kernel_size=(5, 5, 5),
                   strides=(2, 2, 2),
                   activation='relu',
                   input_shape=(voxel_dim, voxel_dim, voxel_dim,
                                deviation_channels)))
        model.add(
            Conv3D(32,
                   kernel_size=(4, 4, 4),
                   strides=(2, 2, 2),
                   activation='relu'))
        model.add(
            Conv3D(32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu'))
        model.add(MaxPool3D(pool_size=(2, 2, 2)))
        model.add(Flatten())
        model.add(
            Dense(128,
                  kernel_regularizer=regularizers.l2(0.02),
                  activation='relu'))
        #model.add(Dropout(0.3))
        model.add(Dense(self.output_dimension, activation=final_layer_avt))
        model.add(mdn.MDN(self.output_dimension, num_of_mixtures))
        model.compile(loss=mdn.get_mixture_loss_func(self.output_dimension,
                                                     num_of_mixtures),
                      optimizer='adam')

        print("3D CNN Mixture Density Network model successfully compiled")
        return model
예제 #3
0
def Fast_body(x, layers, block):
    fast_inplanes = 8
    lateral = []
    x = Conv_BN_ReLU(8, kernel_size=(5, 7, 7), strides=(1, 2, 2))(x)
    x = MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding='same')(x)
    lateral_p1 = Conv3D(8 * 2,
                        kernel_size=(5, 1, 1),
                        strides=(8, 1, 1),
                        padding='same',
                        use_bias=False)(x)
    lateral.append(lateral_p1)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       8,
                                       layers[0],
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    lateral_res2 = Conv3D(32 * 2,
                          kernel_size=(5, 1, 1),
                          strides=(8, 1, 1),
                          padding='same',
                          use_bias=False)(x)
    lateral.append(lateral_res2)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       16,
                                       layers[1],
                                       stride=2,
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    lateral_res3 = Conv3D(64 * 2,
                          kernel_size=(5, 1, 1),
                          strides=(8, 1, 1),
                          padding='same',
                          use_bias=False)(x)
    lateral.append(lateral_res3)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       32,
                                       layers[2],
                                       stride=2,
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    lateral_res4 = Conv3D(128 * 2,
                          kernel_size=(5, 1, 1),
                          strides=(8, 1, 1),
                          padding='same',
                          use_bias=False)(x)
    lateral.append(lateral_res4)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       64,
                                       layers[3],
                                       stride=2,
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    x = GlobalAveragePooling3D()(x)
    return x, lateral
    def cnn_model_3d_aleatoric(self, voxel_dim, deviation_channels):
        """Build the 3D Model with a heteroeskedastic aleatoric loss, this enables different standard deviation of each predicted value, to be used when the expected sensor noise is heteroskedastic

			:param voxel_dim: The voxel dimension of the input, required to build input to the 3D CNN model
			:type voxel_dim: int (required)

			:param voxel_channels: The number of voxel channels in the input structure, required to build input to the 3D CNN model
			:type voxel_channels: int (required)
		"""
        if (self.model_type == "regression"):
            final_layer_avt = 'linear'

        if (self.model_type == "classification"):
            final_layer_avt = 'softmax'

        def myloss(y_true, y_pred):
            prediction = y_pred[:, 0:self.output_dimension]
            log_variance = y_pred[:,
                                  self.output_dimension:self.output_dimension +
                                  1]
            loss = tf.reduce_mean(0.5 * tf.exp(-1 * log_variance) *
                                  tf.square(tf.abs(y_true - prediction)) +
                                  0.5 * log_variance)
            return loss

        from tensorflow.keras.layers import Conv3D, MaxPool3D, Flatten, Dense
        from tensorflow.keras.models import Sequential

        model = Sequential()
        model.add(
            Conv3D(32,
                   kernel_size=(5, 5, 5),
                   strides=(2, 2, 2),
                   activation='relu',
                   input_shape=(voxel_dim, voxel_dim, voxel_dim,
                                deviation_channels)))
        model.add(
            Conv3D(32,
                   kernel_size=(4, 4, 4),
                   strides=(2, 2, 2),
                   activation='relu'))
        model.add(
            Conv3D(32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu'))
        model.add(MaxPool3D(pool_size=(2, 2, 2)))
        model.add(Flatten())
        model.add(
            Dense(128,
                  kernel_regularizer=regularizers.l2(0.02),
                  activation='relu'))
        #model.add(Dropout(0.3))
        model.add(Dense(self.output_dimension, activation=final_layer_avt))
        model.compile(loss=myloss, optimizer='adam', metrics=['mae'])

        print("3D CNN model Aleatoric successfully compiled")
        return model
def AutoEncoderModel():
    # encoder
    X_input = Input((16, 128, 128, 3))

    X = Conv3D(32, 3, padding='same')(X_input)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid')(X)
    # current shape is 8x64x64x32
    X = Conv3D(48, 3, padding='same')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid')(X)
    # current shape is 4x32x32x48
    X = Conv3D(64, 3, padding='same')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid')(X)
    # current shape is 2x16x16x64
    X = Conv3D(64, 3, padding='same')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    X = MaxPool3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(X)
    # current shape is 2x16x16x64
    # decoder

    X = Conv3DTranspose(48, 2, strides=(2, 2, 2), padding='valid')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    # current shape is 4x32x32x48
    X = Conv3DTranspose(32, 2, strides=(2, 2, 2), padding='valid')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    # current shape is 8x64x64x32
    X = Conv3DTranspose(32, 2, strides=(2, 2, 2), padding='valid')(X)
    X = BatchNormalization()(X)
    X = LeakyReLU()(X)
    # current shape is 16x128x128x32
    X = Conv3D(3, 3, strides=(1, 1, 1), padding='same')(X)
    X = Activation('sigmoid')(X)
    # current shape is 16x128x128x3

    model = Model(inputs=X_input, outputs=X, name='AutoEncoderModel')
    return model
예제 #6
0
def video_tower(
        shape=(5, 100, 100, 3), drop_ratio=0.5, kreg=1e-4, weights=None):
    inputs = Input(shape=shape, name='v_in')  # Input
    x = Conv3D(96, (5, 7, 7),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(kreg),
               data_format='channels_last',
               name='s_av_v_conv1')(inputs)  # conv1
    x = MaxPool3D((1, 3, 3), padding='same', name='s_av_v_pool1')(x)  # pool1
    x = Conv3D(256, (1, 5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(kreg),
               data_format='channels_last',
               name='s_av_v_conv2')(x)  # conv2
    x = MaxPool3D((1, 3, 3), padding='same', name='s_av_v_pool2')(x)  # pool2
    x = Conv3D(256, (1, 3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(kreg),
               data_format='channels_last',
               name='s_av_v_conv3')(x)  # conv3
    x = Conv3D(256, (1, 3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(kreg),
               data_format='channels_last',
               name='s_av_v_conv4')(x)  # conv4
    x = Conv3D(256, (1, 3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(kreg),
               data_format='channels_last',
               name='s_av_v_conv5')(x)  # conv5
    x = MaxPool3D((1, 3, 3), padding='same', name='s_av_v_pool5')(x)  # pool5
    x = Conv3D(512, (1, 6, 6),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(kreg),
               data_format='channels_last',
               name='s_av_v_conv6')(x)  # conv6
    x = Flatten()(x)
    return inputs, x
예제 #7
0
파일: model.py 프로젝트: jhcha08/YasuoNet
def build_sequence_model(input_shape_dict):
    video_input_shape = [None] + input_shape_dict['video']
    audio_input_shape = [None] + input_shape_dict['audio']
    weight_decay = 0.005

    # Video 3D Conv layers
    video_input = Input(video_input_shape)
    x = video_input
    x = TimeDistributed(
        Conv3D(8, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='he_uniform',
               kernel_regularizer=l2(weight_decay)))(x)
    x = TimeDistributed(MaxPool3D((2, 2, 2), strides=(2, 2, 2),
                                  padding='same'))(x)
    video_output = TimeDistributed(Flatten())(x)
    print(x.shape, video_output.shape)

    # Audio 2D Conv layers
    audio_input = Input(audio_input_shape)
    x = Reshape(audio_input_shape + [1])  # add channel dim
    x = TimeDistributed(
        Conv2D(4, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='he_uniform',
               kernel_regularizer=l2(weight_decay)))(x)
    x = TimeDistributed(MaxPool2D((2, 2), strides=(2, 2), padding='same'))(x)
    audio_output = TimeDistributed(Flatten())(x)

    # LSTM layers
    x = concatenate([video_output, audio_output])
    print(x.shape)
    x = LSTM(16,
             activation='relu',
             kernel_initializer='he_uniform',
             kernel_regularizer=l2(weight_decay))(x)
    print(x.shape)

    # Fully-connected layers
    x = Dense(16,
              activation='relu',
              kernel_initializer='he_uniform',
              kernel_regularizer=l2(weight_decay))(x)
    #     x = Dropout(0.2)(x)
    fc_output = Dense(1,
                      activation='sigmoid',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(weight_decay))(x)

    model = Model(inputs=[video_input, audio_input], outputs=fc_output)

    return model
예제 #8
0
    def local_network_function(input_img):

        # encoder
        conv1 = Conv3D(256, (2, 2, 2), activation="relu",
                       padding="same")(input_img)
        pool1 = MaxPool3D(pool_size=(2, 2, 2))(conv1)
        conv2 = Conv3D(128, (2, 2, 2), activation="relu",
                       padding="same")(pool1)
        pool2 = MaxPool3D(pool_size=(2, 2, 2))(conv2)
        dens1 = Dense(64, activation="relu")(pool2)
        dens2 = Dense(32, activation="relu")(dens1)
        dens3 = Dense(64, activation="relu")(dens2)
        dens4 = Dense(64, activation="relu")(dens3)
        dens5 = Dense(64, activation="relu")(dens4)
        dens6 = Dense(64, activation="relu")(dens5)

        dense_out = Dense(1, activation=None)(dens6)

        return dense_out
    def cnn_model_3d(self, voxel_dim, deviation_channels):
        """Build the 3D Model using the specified loss function, the inputs are parsed from the assemblyconfig_<case_study_name>.py file

			:param voxel_dim: The voxel dimension of the input, required to build input to the 3D CNN model
			:type voxel_dim: int (required)

			:param voxel_channels: The number of voxel channels in the input structure, required to build input to the 3D CNN model
			:type voxel_channels: int (required)
		"""
        from tensorflow.keras.layers import Conv3D, MaxPool3D, Flatten, Dense, Dropout
        from tensorflow.keras.models import Sequential
        from tensorflow.keras import regularizers

        if (self.output_type == "regression"):
            final_layer_avt = 'linear'

        if (self.output_type == "classification"):
            final_layer_avt = 'softmax'

        model = Sequential()
        model.add(
            Conv3D(32,
                   kernel_size=(5, 5, 5),
                   strides=(2, 2, 2),
                   activation='relu',
                   input_shape=(voxel_dim, voxel_dim, voxel_dim,
                                deviation_channels)))
        model.add(
            Conv3D(32,
                   kernel_size=(4, 4, 4),
                   strides=(2, 2, 2),
                   activation='relu'))
        model.add(
            Conv3D(32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu'))
        model.add(MaxPool3D(pool_size=(2, 2, 2)))
        model.add(Flatten())
        model.add(
            Dense(64,
                  kernel_regularizer=regularizers.l2(self.regularizer_coeff),
                  activation='relu'))
        #model.add(Dropout(0.2))
        model.add(
            Dense(64,
                  kernel_regularizer=regularizers.l2(self.regularizer_coeff),
                  activation='relu'))
        model.add(Dense(self.output_dimension, activation=final_layer_avt))
        model.compile(loss=self.loss_function,
                      optimizer=self.optimizer,
                      metrics=['mae'])

        print("3D CNN model successfully compiled")
        return model
예제 #10
0
파일: main.py 프로젝트: rboyes/nifti
def create_unet(nx, ny, nz):

    inputs = Input((nx, ny, nz, 1))
    conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPool3D(pool_size=(2, 2, 2))(conv1)

    conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPool3D(pool_size=(2, 2, 2))(conv2)

    conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPool3D(pool_size=(2, 2, 2))(conv3)

    conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv4)

    up5 = UpSampling3D(size=(2, 2, 2))(conv4)
    merge5 = concatenate([up5, conv3])
    conv5 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(merge5)
    conv5 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv5)

    up6 = UpSampling3D(size=(2, 2, 2))(conv5)
    merge6 = concatenate([up6, conv2])
    conv6 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(merge6)
    conv6 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv6)

    up7 = UpSampling3D(size=(2, 2, 2))(conv6)
    merge7 = concatenate([up7, conv1])
    conv7 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(merge7)
    conv7 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv7)

    conv8 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv7)

    model = Model(inputs=inputs, outputs=conv8)

    model.compile(optimizer=Adam(lr=1.0E-5), loss=overlap_loss)

    return model
예제 #11
0
 def __init__(self):
     super().__init__()
     leaky_relu = LeakyReLU(alpha=0.01)
     l2_regularizer = l2(0.001)
     self.add(Conv3D(16, [3, 3, 1], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(Conv3D(16, [1, 1, 4], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(BatchNormalization())
     self.add(MaxPool3D([1, 1, 2]))
     self.add(Conv3D(32, [3, 3, 1], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(Conv3D(32, [1, 1, 4], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(BatchNormalization())
     self.add(MaxPool3D([1, 1, 2]))
     self.add(Conv3D(64, [3, 3, 1], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(Conv3D(64, [1, 1, 4], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(BatchNormalization())
     self.add(MaxPool3D([1, 1, 2]))
     self.add(Conv3D(128, [4, 4, 1], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(Conv3D(128, [1, 1, 4], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(BatchNormalization())
     self.add(MaxPool3D([1, 1, 2]))
     self.add(Conv3D(128, [1, 1, 4], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(BatchNormalization())
     self.add(MaxPool3D([1, 1, 2]))
     self.add(Conv3D(128, [1, 1, 4], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(BatchNormalization())
     self.add(MaxPool3D([1, 1, 2]))
     self.add(Conv3D(32, [1, 1, 9], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(Conv3D(16, [1, 1, 1], activation=leaky_relu, kernel_regularizer=l2_regularizer))
     self.add(Conv3D(1, [1, 1, 1], activation=sigmoid))
     self.add(Reshape([1]))
예제 #12
0
def build_classification_network(batch_size: int, nb_points: int,
                                 subdivisions: Tuple[int, int,
                                                     int], variance: float):
    """
    Build a classification network, using modified 3D Fisher Vectors as point cloud featurizer at the input.
    """
    points = Input(batch_shape=(batch_size, nb_points, 3), name="points")

    fv = Modified3DFisherVectors(subdivisions, variance, flatten=False)(points)

    fv = Reshape((-1, ) + subdivisions, name="reshape_3dmFV")(fv)

    x = Permute((2, 3, 4, 1), name="permute_3dmFV")(fv)

    # convolve
    x = inception_block(x, nb_filters=64, name="block1")
    x = inception_block(x, nb_filters=128, name="block2")
    x = inception_block(x, nb_filters=256, name="block3")
    x = MaxPool3D(name="block3_maxpool")(x)

    x = inception_block(x, nb_filters=256, name="block4")
    x = inception_block(x, nb_filters=512, name="block5")
    x = MaxPool3D(name="block5_maxpool")(x)

    x = Flatten(name="flatten")(x)
    x = Dense(1024, name="fc1", activation="linear", use_bias=False)(x)
    x = BatchNormalization(name="fc1_bn", fused=True)(x)
    x = Activation("relu", name="fc1_relu")(x)
    x = Dropout(0.3, name="dp1")(x)
    x = Dense(256, name="fc2", activation="linear", use_bias=False)(x)
    x = BatchNormalization(name="fc2_bn", fused=True)(x)
    x = Activation("relu", name="fc2_relu")(x)
    x = Dropout(0.3, name="dp2")(x)
    x = Dense(128, name="fc3", activation="linear", use_bias=False)(x)
    x = BatchNormalization(name="fc3_bn", fused=True)(x)
    x = Activation("relu", name="fc3_relu")(x)
    x = Dropout(0.3, name="dp3")(x)
    x = Dense(40, name="output", activation="softmax")(x)

    return tf.keras.models.Model(points, x)
예제 #13
0
def get_model(width=128, height=128, depth=64):
    inputs = Input((width, height, depth, 1))
    layer = Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    layer = MaxPool3D(pool_size=2)(layer)
    layer = BatchNormalization()(layer)

    layer = Conv3D(filters=64, kernel_size=3, activation="relu")(layer)
    layer = MaxPool3D(pool_size=2)(layer)
    layer = BatchNormalization()(layer)

    layer = Conv3D(filters=128, kernel_size=3, activation="relu")(layer)
    layer = MaxPool3D(pool_size=2)(layer)
    layer = BatchNormalization()(layer)

    layer = Conv3D(filters=256, kernel_size=3, activation="relu")(layer)
    layer = MaxPool3D(pool_size=2)(layer)
    layer = BatchNormalization()(layer)

    layer = GlobalAveragePooling3D()(layer)
    layer = Dense(units=512, activation="relu")(layer)
    layer = Dropout(0.3)(layer)

    outputs = Dense(units=1, activation="sigmoid")(layer)

    # Define the model.
    model = Model(inputs, outputs, name="3Dcnn")
    initial_learning_rate = 0.0001
    lr_schedule = ExponentialDecay(initial_learning_rate,
                                   decay_steps=100000,
                                   decay_rate=0.96,
                                   staircase=True)

    model.compile(
        loss="binary_crossentropy",
        optimizer=Adam(learning_rate=lr_schedule),
        metrics=["acc"],
    )

    return model
예제 #14
0
def static_video(shape=(5, 100, 100, 3), weights=None):
    """
    A static video model based on the description of arXiv:1906.10555v1 video tower, i.e., variation of VGGM.
    shape = (window size, height, width, chans)
    """
    inputs = Input(shape=shape, name='v_in')  # Input

    x = Conv3D(96, (5, 7, 7), padding='same', activation='relu',
               data_format='channels_last', name='s_v_conv1')(inputs)  # conv1
    x = MaxPool3D((1, 3, 3), padding='same', name='s_v_pool1')(x)  # pool1

    x = Conv3D(256, (1, 5, 5), padding='same', activation='relu',
               data_format='channels_last', name='s_v_conv2')(x)  # conv2
    x = MaxPool3D((1, 3, 3), padding='same', name='s_v_pool2')(x)  # pool2

    x = Conv3D(256, (1, 3, 3), padding='same', activation='relu',
               data_format='channels_last', name='s_v_conv3')(x)  # conv3

    x = Conv3D(256, (1, 3, 3), padding='same', activation='relu',
               data_format='channels_last', name='s_v_conv4')(x)  # conv4

    x = Conv3D(256, (1, 3, 3), padding='same', activation='relu',
               data_format='channels_last', name='s_v_conv5')(x)  # conv5
    x = MaxPool3D((1, 3, 3), padding='same', name='s_v_pool5')(x)  # pool5

    x = Conv3D(512, (1, 6, 6), padding='same', activation='relu',
               data_format='channels_last', name='s_v_conv6')(x)  # conv6

    x = Flatten()(x)
    x = Dense(256, activation='relu', name='s_v_fc7')(x)  # fc7
    outputs = Dense(2, activation='softmax', name='main_out')(x)  # fc7

    model = Model(inputs=inputs, outputs=outputs)

    if weights is not None:
        model.load_weights(weights, by_name=True)

    return model
예제 #15
0
 def build(self, input_shape):
     if len(input_shape) == 2:
         self.conv_1 = Dense(100, activation='relu', input_shape=input_shape)
         self.max_pool_1 = Dropout(0.2)
         self.conv_2 = Dense(50, activation='relu')
         self.max_pool_2 = Dropout(0.2)
     elif len(input_shape) == 3:
         self.conv_1 = Conv1D(20, kernel_size=5, activation='relu', input_shape=input_shape)
         self.max_pool_1 = MaxPool1D()
         self.conv_2 = Conv1D(50, kernel_size=5, activation='relu')
         self.max_pool_2 = MaxPool1D()
     elif len(input_shape) == 4:
         self.conv_1 = Conv2D(20, kernel_size=5, activation='relu', input_shape=input_shape)
         self.max_pool_1 = MaxPool2D()
         self.conv_2 = Conv2D(50, kernel_size=5, activation='relu')
         self.max_pool_2 = MaxPool2D()
     elif len(input_shape) == 5:
         self.conv_1 = Conv3D(20, kernel_size=5, activation='relu', input_shape=input_shape)
         self.max_pool_1 = MaxPool3D()
         self.conv_2 = Conv3D(50, kernel_size=5, activation='relu')
         self.max_pool_2 = MaxPool3D()
     else:
         raise DimensionError("Input shape not covered by this model")
예제 #16
0
    def conv3d(
        inputs: Tensor, 
        filters: int, 
        downsizing: bool = True, 
        loop: int = 2) -> Tensor:

        if downsizing:
            inputs = MaxPool3D(pool_size=(2, 2, 2))(inputs)
        x = inputs
        for _ in range(loop):
            x = Conv3D(filters, (3, 3, 3), strides=(1, 1, 1), use_bias=False, padding='same')(x)
            x = InstanceNormalization()(x)
            x = Activation('relu')(x)
        return x
예제 #17
0
def Slow_body(x, lateral, layers, block):
    slow_inplanes = 64 + 64//8*2
    x = Conv_BN_ReLU(64, kernel_size=(1, 7, 7), strides=(1, 2, 2))(x)
    x = MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding='same')(x)
    x = Concatenate()([x, lateral[0]])
    x, slow_inplanes = make_layer_slow(x, block, 64, layers[0], head_conv=1, slow_inplanes=slow_inplanes)
    x = Concatenate()([x, lateral[1]])
    x, slow_inplanes = make_layer_slow(x, block, 128, layers[1], stride=2, head_conv=1, slow_inplanes=slow_inplanes)
    x = Concatenate()([x, lateral[2]])
    x, slow_inplanes = make_layer_slow(x, block, 256, layers[2], stride=2, head_conv=1, slow_inplanes=slow_inplanes)
    x = Concatenate()([x, lateral[3]])
    x, slow_inplanes = make_layer_slow(x, block, 512, layers[3], stride=2, head_conv=1, slow_inplanes=slow_inplanes)
    x = GlobalAveragePooling3D()(x)
    return x
예제 #18
0
    def __init__(self, filters_list):
        super(MixedBlock, self).__init__()

        self.cbl_0 = CBL(filters_list[0], kernel=[1, 1, 1])

        self.cbl_1_1 = CBL(filters_list[1], kernel=[1, 1, 1])
        self.cbl_1_2 = CBL(filters_list[2], kernel=[3, 3, 3])

        self.cbl_2_1 = CBL(filters_list[3], kernel=[1, 1, 1])
        self.cbl_2_2 = CBL(filters_list[4], kernel=[3, 3, 3])

        self.maxpool3d_3 = MaxPool3D(pool_size=[3, 3, 3],
                                     strides=[1, 1, 1],
                                     padding='same')
        self.cbl_3 = CBL(filters_list[5], kernel=[1, 1, 1])
예제 #19
0
def build_model():
    model = Sequential()
    model.add(Conv(8, (3, 3, 3), input_shape=(91, 109, 91, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))
    model.add(Dropout(0.25))
    #model.add(Conv(32, (3,3,3)))
    #model.add(MaxPool3D())
    #model.add(Dropout(0.25))
    model.add(Flatten())
    #model.add(Dense(4096, activation='relu'))
    #model.add(Dense(1024, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    return model
def build_SimpleNet_noOutput(input_shape, output_shape, outputsize_firstLayer, outputsize_secondLayer, outputsize_dense):
    
    input_layer = Input(shape=input_shape, dtype='float32')
    
    conv_layer = Conv3D(outputsize_firstLayer, kernel_size=3, padding='same', activation='relu')(input_layer)
    max_pool = MaxPool3D(pool_size=(1, 3, 3), strides=2)(conv_layer)
    norm = BatchNormalization()(conv_layer)    
#     dropout = Dropout(0.5)(norm)
    
    conv_layer = Conv3D(outputsize_secondLayer, kernel_size=3, padding='same', activation='relu')(dropout)
    max_pool = MaxPool3D(pool_size= (1, 3, 3), strides=2)(conv_layer)
    norm = BatchNormalization()(conv_layer)    
#     dropout_base = Dropout(0.5)(norm)
    
    flatten = Flatten()(dropout_base)
#     dense = Dense(outputsize_dense, activation='relu')(flatten)
    output = Dense(output_shape, activation='softmax')(dense)
    model = Model(inputs=[input_layer], outputs=[dense])
#     model.compile(loss='sparse_categorical_crossentropy',
#               optimizer='Adam',  #(learning_rate=0.00005)',
#                metrics=['accuracy'])
    
    
    return model
예제 #21
0
    def FCN(self, n=None):
        """
        fully convolutional neural network (doesn't work as intended when scaling up input dimension')
        """
        input_layer = (64, 64, n, 3)
        kernel_conv1 = (5, 5, 3)
        kernel_conv2 = (3, 3, 3)
        kernel_conv3 = (6, 6, 1)
        kernal_softmax = (1, 1, 4)

        model = Sequential()
        model.add(InputLayer(input_layer))
        model.add(
            Conv3D(kernel_size=kernel_conv1,
                   filters=16,
                   strides=(2, 2, 1),
                   padding="valid",
                   activation='relu'))
        model.add(
            Conv3D(kernel_size=kernel_conv2,
                   filters=24,
                   strides=(2, 2, 1),
                   padding="valid",
                   activation='relu'))
        model.add(
            Conv3D(kernel_size=kernel_conv2,
                   filters=32,
                   strides=(2, 2, 1),
                   padding='valid',
                   activation='relu'))
        model.add(
            Conv3D(kernel_size=kernel_conv3,
                   filters=12,
                   strides=(2, 2, 1),
                   padding='valid',
                   activation='relu'))
        model.add(MaxPool3D((1, 1, 4), strides=(1, 1, 1)))
        model.add(
            Conv3D(kernel_size=(1, 1, 1),
                   filters=2,
                   strides=(1, 1, 1),
                   padding='valid',
                   activation='softmax'))
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=[categorical_crossentropy, categorical_accuracy])
        model.summary()
        return (model)
    def cnn_model_3d_tl(self, voxel_dim, deviation_channels):
        """Build the 3D Model with GlobalMAxPooling3D instead of flatten, this enables input for different voxel dimensions, to be used when the model needs to be leveraged for transfer learning with different size input

			:param voxel_dim: The voxel dimension of the input, required to build input to the 3D CNN model
			:type voxel_dim: int (required)

			:param voxel_channels: The number of voxel channels in the input structure, required to build input to the 3D CNN model
			:type voxel_channels: int (required)
		"""
        from tensorflow.keras.layers import Conv3D, MaxPool3D, Flatten, Dense, Dropout, Input
        from tensorflow.keras.models import Model
        from tensorflow.keras import regularizers

        inputs = Input(shape=(
            None,
            None,
            None,
            3,
        ))
        cnn3d_1 = Conv3D(32,
                         kernel_size=(5, 5, 5),
                         strides=(2, 2, 2),
                         activation='relu')(inputs)
        cnn3d_2 = Conv3D(32,
                         kernel_size=(4, 4, 4),
                         strides=(2, 2, 2),
                         activation='relu')(cnn3d_1)
        cnn3d_3 = Conv3D(32,
                         kernel_size=(3, 3, 3),
                         strides=(1, 1, 1),
                         activation='relu')(cnn3d_2)
        max_pool3d = MaxPool3D(pool_size=(2, 2, 2))(cnn3d_3)
        pooled_layer = GlobalMaxPooling3D()(max_pool3d)
        dense_1 = Dense(128,
                        kernel_regularizer=regularizers.l2(
                            self.regularizer_coeff),
                        activation='relu')(pooled_layer)
        predictions = Dense(self.output_dimension,
                            activation=final_layer_avt)(dense_1)
        model = Model(inputs=inputs, outputs=predictions)

        model.compile(loss=self.loss_function,
                      optimizer=self.optimizer,
                      metrics=['mae'])

        return model
예제 #23
0
def build_model(input_shape_dict):
    video_input_shape = input_shape_dict['video']
    audio_input_shape = input_shape_dict['audio']
    weight_decay = 0.005

    # Video 3D Conv layers
    video_input = Input(video_input_shape)
    x = Conv3D(8, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='he_uniform',
               kernel_regularizer=l2(weight_decay))(video_input)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    video_output = Flatten()(x)

    # Audio 2D Conv layers
    audio_input = Input(audio_input_shape)
    x = expand_dims(audio_input)  # add channel dim
    x = Conv2D(4, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='he_uniform',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool2D((2, 2), strides=(2, 2), padding='same')(x)
    audio_output = Flatten()(x)

    # Fully-connected layers
    fc_input = concatenate([video_output, audio_output])
    x = Dense(16,
              activation='relu',
              kernel_initializer='he_uniform',
              kernel_regularizer=l2(weight_decay))(fc_input)
    # x = Dropout(0.2)(x)
    fc_output = Dense(1,
                      activation='sigmoid',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(weight_decay))(x)

    model = Model(inputs=[video_input, audio_input], outputs=fc_output)

    return model
예제 #24
0
    def model(self, lr=0.1):
        input_layer = (64, 64, 10, 3)
        kernel_conv1 = (5, 5, 3)
        kernel_conv2 = (3, 3, 3)
        kernel_conv3 = (6, 6, 1)
        kernal_softmax = (1, 1, 4)

        model = Sequential()
        model.add(InputLayer(input_layer))
        model.add(
            Conv3D(kernel_size=kernel_conv1,
                   filters=16,
                   strides=(2, 2, 1),
                   padding='valid',
                   activation='relu'))
        model.add(
            Conv3D(kernel_size=kernel_conv2,
                   filters=24,
                   strides=(2, 2, 1),
                   padding='valid',
                   activation='relu'))
        model.add(
            Conv3D(kernel_size=kernel_conv2,
                   filters=32,
                   strides=(2, 2, 1),
                   padding='valid',
                   activation='relu'))
        model.add(
            Conv3D(kernel_size=kernel_conv3,
                   filters=12,
                   strides=(2, 2, 1),
                   padding='valid',
                   activation='relu'))
        model.add(MaxPool3D(kernal_softmax))
        model.add(Flatten())
        model.add(Dense(2, activation='softmax'))
        opt = SGD(learning_rate=lr)
        model.compile(optimizer=opt,
                      loss='categorical_crossentropy',
                      metrics=[categorical_crossentropy, categorical_accuracy])

        return (model)
예제 #25
0
def down_sampling(Input,
                  target_size,
                  kernel_size=3,
                  padding="same",
                  activation="relu",
                  stride=1):
    conv = Conv3D(target_size,
                  kernel_size,
                  padding=padding,
                  strides=stride,
                  activation=activation,
                  kernel_initializer='he_normal')(Input)
    conv = Conv3D(target_size,
                  kernel_size,
                  padding=padding,
                  strides=stride,
                  activation=activation,
                  kernel_initializer='he_normal')(conv)
    pool = MaxPool3D(pool_size=(2, 2, 2))(conv)
    return conv, pool
    def __init__(self, target_time_offsets):

        # deltatime to try: -30 min, -1h, -1h30, -2h, -3h, -4h, -5h
        super(cnn3d, self).__init__()
        self.conv1 = Conv3D(filters=64,
                            kernel_size=(3, 3, 3),
                            padding='same',
                            activation='relu',
                            input_shape=(7, 64, 64, 5))
        self.conv2 = Conv3D(filters=64,
                            kernel_size=(4, 4, 4),
                            padding='same',
                            activation='relu')
        self.conv3 = Conv3D(filters=32,
                            kernel_size=(2, 2, 2),
                            padding='same',
                            activation='relu')
        self.maxpool1 = MaxPool3D(pool_size=(2, 2, 2), padding='same')
        self.flatten = Flatten()
        self.d1 = Dense(264, activation='relu')
        self.d2 = Dense(len(target_time_offsets), activation="linear")
예제 #27
0
    def get_appearance_encoder(self):
        app_shape = tuple([None] + list(self.appearance_shape)[2:])
        inputs = Input(shape=app_shape, name='encoder_app_input')

        x = inputs
        x = TimeDistributed(
            ImageNormalization2D(norm_method='whole_image',
                                 name='imgnrm_ae'))(x)

        for i in range(int(math.log(app_shape[1], 2))):
            x = Conv3D(self.n_filters, (1, 3, 3),
                       strides=1,
                       padding='same',
                       use_bias=False,
                       name='conv3d_ae{}'.format(i))(x)
            x = BatchNormalization(axis=-1, name='bn_ae{}'.format(i))(x)
            x = Activation('relu', name='relu_ae{}'.format(i))(x)
            x = MaxPool3D(pool_size=(1, 2, 2))(x)
        x = Lambda(lambda t: tf.squeeze(t, axis=(2, 3)))(x)
        x = Dense(self.encoder_dim, name='dense_aeout')(x)
        x = BatchNormalization(axis=-1, name='bn_aeout')(x)
        x = Activation('relu', name='appearance_embedding')(x)
        return Model(inputs=inputs, outputs=x)
예제 #28
0
def bn_feature_net_3D(receptive_field=61,
                      n_frames=5,
                      input_shape=(5, 256, 256, 1),
                      n_features=3,
                      n_channels=1,
                      reg=1e-5,
                      n_conv_filters=64,
                      n_dense_filters=200,
                      VGG_mode=False,
                      init='he_normal',
                      norm_method='std',
                      location=False,
                      dilated=False,
                      padding=False,
                      padding_mode='reflect',
                      multires=False,
                      include_top=True,
                      temporal=None,
                      residual=False,
                      temporal_kernel_size=3):
    """Creates a 3D featurenet.

    Args:
        receptive_field (int): the receptive field of the neural network.
        n_frames (int): Number of frames.
        input_shape (tuple): If no input tensor, create one with this shape.
        n_features (int): Number of output features
        n_channels (int): number of input channels
        reg (int): regularization value
        n_conv_filters (int): number of convolutional filters
        n_dense_filters (int): number of dense filters
        VGG_mode (bool): If ``multires``, uses ``VGG_mode``
            for multiresolution
        init (str): Method for initalizing weights.
        norm_method (str): Normalization method to use with the
            :mod:`deepcell.layers.normalization.ImageNormalization3D` layer.
        location (bool): Whether to include a
            :mod:`deepcell.layers.location.Location3D` layer.
        dilated (bool): Whether to use dilated pooling.
        padding (bool): Whether to use padding.
        padding_mode (str): Type of padding, one of 'reflect' or 'zero'
        multires (bool): Enables multi-resolution mode
        include_top (bool): Whether to include the final layer of the model
        temporal (str): Type of temporal operation
        residual (bool): Whether to use temporal information as a residual
        temporal_kernel_size (int): size of 2D kernel used in temporal convolutions

    Returns:
        tensorflow.keras.Model: 3D FeatureNet
    """
    # Create layers list (x) to store all of the layers.
    # We need to use the functional API to enable the multiresolution mode
    x = []

    win = (receptive_field - 1) // 2
    win_z = (n_frames - 1) // 2

    if dilated:
        padding = True

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
        time_axis = 2
        row_axis = 3
        col_axis = 4
        if not dilated:
            input_shape = (n_channels, n_frames, receptive_field,
                           receptive_field)
    else:
        channel_axis = -1
        time_axis = 1
        row_axis = 2
        col_axis = 3
        if not dilated:
            input_shape = (n_frames, receptive_field, receptive_field,
                           n_channels)

    x.append(Input(shape=input_shape))
    x.append(
        ImageNormalization3D(norm_method=norm_method,
                             filter_size=receptive_field)(x[-1]))

    if padding:
        if padding_mode == 'reflect':
            x.append(ReflectionPadding3D(padding=(win_z, win, win))(x[-1]))
        elif padding_mode == 'zero':
            x.append(ZeroPadding3D(padding=(win_z, win, win))(x[-1]))

    if location:
        x.append(Location3D()(x[-1]))
        x.append(Concatenate(axis=channel_axis)([x[-2], x[-1]]))

    layers_to_concat = []

    rf_counter = receptive_field
    block_counter = 0
    d = 1

    while rf_counter > 4:
        filter_size = 3 if rf_counter % 2 == 0 else 4
        x.append(
            Conv3D(n_conv_filters, (1, filter_size, filter_size),
                   dilation_rate=(1, d, d),
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(x[-1]))
        x.append(BatchNormalization(axis=channel_axis)(x[-1]))
        x.append(Activation('relu')(x[-1]))

        block_counter += 1
        rf_counter -= filter_size - 1

        if block_counter % 2 == 0:
            if dilated:
                x.append(
                    DilatedMaxPool3D(dilation_rate=(1, d, d),
                                     pool_size=(1, 2, 2))(x[-1]))
                d *= 2
            else:
                x.append(MaxPool3D(pool_size=(1, 2, 2))(x[-1]))

            if VGG_mode:
                n_conv_filters *= 2

            rf_counter = rf_counter // 2

            if multires:
                layers_to_concat.append(len(x) - 1)

    if multires:
        c = []
        for l in layers_to_concat:
            output_shape = x[l].get_shape().as_list()
            target_shape = x[-1].get_shape().as_list()
            time_crop = (0, 0)

            row_crop = int(output_shape[row_axis] - target_shape[row_axis])

            if row_crop % 2 == 0:
                row_crop = (row_crop // 2, row_crop // 2)
            else:
                row_crop = (row_crop // 2, row_crop // 2 + 1)

            col_crop = int(output_shape[col_axis] - target_shape[col_axis])

            if col_crop % 2 == 0:
                col_crop = (col_crop // 2, col_crop // 2)
            else:
                col_crop = (col_crop // 2, col_crop // 2 + 1)

            cropping = (time_crop, row_crop, col_crop)

            c.append(Cropping3D(cropping=cropping)(x[l]))
        x.append(Concatenate(axis=channel_axis)(c))

    x.append(
        Conv3D(n_dense_filters, (1, rf_counter, rf_counter),
               dilation_rate=(1, d, d),
               kernel_initializer=init,
               padding='valid',
               kernel_regularizer=l2(reg))(x[-1]))
    x.append(BatchNormalization(axis=channel_axis)(x[-1]))
    x.append(Activation('relu')(x[-1]))

    x.append(
        Conv3D(n_dense_filters, (n_frames, 1, 1),
               dilation_rate=(1, d, d),
               kernel_initializer=init,
               padding='valid',
               kernel_regularizer=l2(reg))(x[-1]))
    x.append(BatchNormalization(axis=channel_axis)(x[-1]))
    feature = Activation('relu')(x[-1])

    def __merge_temporal_features(feature,
                                  mode='conv',
                                  residual=False,
                                  n_filters=256,
                                  n_frames=3,
                                  padding=True,
                                  temporal_kernel_size=3):
        if mode is None:
            return feature

        mode = str(mode).lower()
        if mode == 'conv':
            x = Conv3D(n_filters,
                       (n_frames, temporal_kernel_size, temporal_kernel_size),
                       kernel_initializer=init,
                       padding='same',
                       activation='relu',
                       kernel_regularizer=l2(reg))(feature)
        elif mode == 'lstm':
            x = ConvLSTM2D(filters=n_filters,
                           kernel_size=temporal_kernel_size,
                           padding='same',
                           kernel_initializer=init,
                           activation='relu',
                           kernel_regularizer=l2(reg),
                           return_sequences=True)(feature)
        elif mode == 'gru':
            x = ConvGRU2D(filters=n_filters,
                          kernel_size=temporal_kernel_size,
                          padding='same',
                          kernel_initializer=init,
                          activation='relu',
                          kernel_regularizer=l2(reg),
                          return_sequences=True)(feature)
        else:
            raise ValueError(
                '`temporal` must be one of "conv", "lstm", "gru" or None')

        if residual is True:
            temporal_feature = Add()([feature, x])
        else:
            temporal_feature = x
        temporal_feature_normed = BatchNormalization(
            axis=channel_axis)(temporal_feature)
        return temporal_feature_normed

    temporal_feature = __merge_temporal_features(
        feature,
        mode=temporal,
        residual=residual,
        n_filters=n_dense_filters,
        n_frames=n_frames,
        padding=padding,
        temporal_kernel_size=temporal_kernel_size)
    x.append(temporal_feature)

    x.append(
        TensorProduct(n_dense_filters,
                      kernel_initializer=init,
                      kernel_regularizer=l2(reg))(x[-1]))
    x.append(BatchNormalization(axis=channel_axis)(x[-1]))
    x.append(Activation('relu')(x[-1]))

    x.append(
        TensorProduct(n_features,
                      kernel_initializer=init,
                      kernel_regularizer=l2(reg))(x[-1]))

    if not dilated:
        x.append(Flatten()(x[-1]))

    if include_top:
        x.append(Softmax(axis=channel_axis, dtype=K.floatx())(x[-1]))

    model = Model(inputs=x[0], outputs=x[-1])

    return model
예제 #29
0
def d3UNet(img_depth, img_height, img_width, img_channels):

    inputs = Input((img_depth, img_height, img_width, img_channels))
    s = Lambda(lambda x: x / 255) (inputs)

    c1 = Conv3D(filters=16, kernel_size=(3, 3, 3), activation='elu', kernel_initializer='he_normal',
                padding='same', data_format = dataformat, name='Conv1_1') (s)
    c1 = Dropout(0.2) (c1)
    c1 = Conv3D(16, kernel_size=(3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv1_2') (c1)
    p1 = MaxPool3D(pool_size=(2, 2, 2), name='Pool1') (c1)

    #Conv2
    c2 = Conv3D(32, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv2_1') (p1)
    c2 = Dropout(0.1) (c2)
    c2 = Conv3D(32, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv2_2') (c2)
    p2 = MaxPool3D(pool_size=(2, 2, 2), name='Pool2') (c2)

    # Conv3
    c3 = Conv3D(64, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv3_1') (p2)
    c3 = Dropout(0.3) (c3)
    c3 = Conv3D(64, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv3_2') (c3)
    p3 = MaxPool3D((2, 2, 2), name='Pool3') (c3)

    # Conv4
    c4 = Conv3D(128, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv4_1') (p3)
    c4 = Dropout(0.3) (c4)
    c4 = Conv3D(128, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv4_2') (c4)
    p4 = MaxPool3D(pool_size=(2, 2, 2), name='Pool4') (c4)

    # Conv5
    c5 = Conv3D(256, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv5_1') (p4)
    c5 = Dropout(0.4) (c5)
    c5 = Conv3D(256, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv5_2') (c5)

    # Up6
    u6 = Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding='same', name='ConvT1', data_format=dataformat) (c5)
    u6 = concatenate([u6, c4], axis=4)
    c6 = Conv3D(128, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv6_1') (u6)
    c6 = Dropout(0.3) (c6)
    c6 = Conv3D(128, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv6_2') (c6)

    # Up7
    u7 = Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding='same', name='ConvT2', data_format=dataformat) (c6)
    u7 = concatenate([u7, c3])
    c7 = Conv3D(64, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv7_1') (u7)
    c7 = Dropout(0.3) (c7)
    c7 = Conv3D(64, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv7_2') (c7)

    # Up8
    u8 = Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same', name='ConvT3', data_format=dataformat) (c7)
    u8 = concatenate([u8, c2])
    c8 = Conv3D(32, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv8_1') (u8)
    c8 = Dropout(0.2) (c8)
    c8 = Conv3D(32, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv8_2') (c8)

    # Up9
    u9 = Conv3DTranspose(filters=16, kernel_size=(2, 2, 2), strides=(2, 2, 2), padding='same', name='ConvT4', data_format=dataformat) (c8)
    u9 = concatenate([u9, c1], axis=4)
    c9 = Conv3D(16, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv9_1') (u9)
    c9 = Dropout(0.2) (c9)
    c9 = Conv3D(16, (3, 3, 3), activation='elu', kernel_initializer='he_normal', padding='same',
                data_format = dataformat, name = 'Conv9_2') (c9)

    outputs = Conv3D(1, (1, 1, 1), activation='sigmoid', name='Output') (c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    return model
예제 #30
0
def cnn3d(channels=3, pixels_x=96, pixels_y=96, output_size=7):
    model = Sequential(name='conv3d')
    model.add(
        Input(shape=(None, pixels_x, pixels_y, channels), name='input_conv3d'))
    model.add(
        Conv3D(filters=16,
               kernel_size=(3, 3, 3),
               strides=(2, 2, 2),
               padding='same',
               kernel_initializer='glorot_normal',
               name="Conv_1"))
    model.add(TimeDistributed(BatchNormalization(), name="bn_1"))
    model.add(TimeDistributed(LeakyReLU(alpha=0.3), name="lr_1"))

    model.add(
        Conv3D(filters=64,
               kernel_size=(3, 3, 3),
               strides=(2, 2, 2),
               padding='same',
               kernel_initializer='glorot_normal',
               name="Conv_2"))
    model.add(TimeDistributed(BatchNormalization(), name="bn_2"))
    model.add(TimeDistributed(LeakyReLU(alpha=0.3), name="lr_2"))

    model.add(
        Conv3D(filters=128,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='glorot_normal',
               name="Conv_3"))
    model.add(TimeDistributed(BatchNormalization(), name="bn_3"))
    model.add(MaxPool3D(pool_size=(2, 2, 2), padding='valid', name="mp_3"))
    model.add(TimeDistributed(LeakyReLU(alpha=0.3), name="lr_3"))

    model.add(
        Conv3D(filters=256,
               kernel_size=(3, 3, 3),
               strides=(1, 2, 2),
               padding='same',
               kernel_initializer='glorot_normal',
               name="Conv_4"))
    model.add(TimeDistributed(BatchNormalization(), name="bn_4"))
    model.add(TimeDistributed(LeakyReLU(alpha=0.3), name="lr_4"))
    model.add(TimeDistributed(Dropout(0.25), name="dropout_4"))

    model.add(
        Conv3D(filters=512,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='glorot_normal',
               name="Conv_5"))
    model.add(TimeDistributed(BatchNormalization(), name="bn_5"))
    model.add(TimeDistributed(LeakyReLU(alpha=0.3), name="lr_5"))
    model.add(TimeDistributed(Dropout(0.3), name="dropout_5"))

    model.add(GlobalAveragePooling3D())
    model.add(BatchNormalization(name="bn_6"))
    model.add(LeakyReLU(alpha=0.3, name="lr_6"))
    model.add(Dropout(0.5, name="dropout_6"))
    model.add(Dense(units=128, kernel_regularizer=regularizers.l2(0.005)))
    model.add(BatchNormalization(name="bn_7"))
    model.add(LeakyReLU(alpha=0.3, name="lr_7"))
    model.add(Dropout(0.5, name="dropout_7"))
    model.add(Dense(output_size, name="output"))
    model.add(Softmax())

    return model