def LSTMUnet():
    '''
    Input frame, with size 8*256*256*1 numpy array
    '''
    row = 256
    col = 256
    input_image = KL.Input(shape=[8, row,col,1], name="input_image")
    L4 = input_image
    for i in range(3):
        L4 = getBiConvLSTM2d(L4,filters=20, kernel_size=(3, 3),name='top'+str(i))

    L5 = KL.Conv3D(filters=8, kernel_size=(3, 3, 8),
                activation='relu',
                padding='same', data_format='channels_last')(L4)
    # L5 = KL.BatchNormalization(name="batchNormL_sel5")(L5)

    L6 = KL.Conv3D(filters=1, kernel_size=(3, 3, 4),
                activation='relu',
                padding='same', data_format='channels_last')(L5)
    L6 = KL.BatchNormalization(name="batchNormL_sel5")(L6)

    L7 = KL.Conv3D(filters=1, kernel_size=(3, 3, 8),
                activation='relu',
                padding='same', data_format='channels_first')(L6)
    L7 = Reshape((row,col,1))(L7)
    seg = unet(L7)
    model = KM.Model(input_image,seg)
    model.compile(loss='mean_squared_error', optimizer='adadelta')
    return model
Пример #2
0
    def grouped_convolution(y, nb_channels, _strides):
        # when `cardinality` == 1 this is just a standard convolution
        if cardinality == 1:
            return layers.Conv3D(
                nb_channels,
                kernel_size=(3, 3, 3),
                strides=_strides,
                padding='same',
                kernel_regularizer=regularizers.l2(L2_regularizer))(y)

        assert not nb_channels % cardinality
        _d = nb_channels // cardinality

        # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
        # and convolutions are separately performed within each group
        groups = []
        for j in range(cardinality):
            group = layers.Lambda(lambda z: z[:, :, :, :, j * _d:j * _d + _d])(
                y)
            groups.append(
                layers.Conv3D(
                    _d,
                    kernel_size=(3, 3, 3),
                    strides=_strides,
                    padding='same',
                    kernel_regularizer=regularizers.l2(L2_regularizer))(group))

        # the grouped convolutional layer concatenates them as the outputs of the layer
        y = layers.concatenate(groups)

        return y
Пример #3
0
def create_3DCNN_model(input_shape):
    """Build architecture of the model"""
    model = Sequential()
    model.add(layers.Conv3D(32, (3, 3, 3), input_shape=input_shape,
                            activation="relu", padding="same"))
    model.add(layers.Conv3D(64, (3, 3, 3), activation="selu", padding="same"))
    model.add(layers.MaxPooling3D(pool_size=(3, 3, 3)))
    model.add(layers.Conv3D(64, (3, 3, 3), activation="selu", padding="same"))
    model.add(layers.Conv3D(64, (3, 3, 3), activation="selu", padding="same"))
    model.add(layers.MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(layers.Conv3D(128, (3, 3, 3), activation="selu", padding="same"))
    model.add(layers.MaxPooling3D(pool_size=(2, 2, 2), padding="same"))
    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation="selu",
                           kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.Dropout(0.2))
    model.add(layers.Dense(32, activation="selu"))
    model.add(layers.Dense(10, activation="softmax"))

    # Create model
    model.compile(optimizer=tf.train.AdamOptimizer(),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    return model
def CNN_Classification():
    image_channels = 1

    model = models.Sequential()

    model.add(
        layers.Conv3D(32, (3, 3, 3),
                      activation='relu',
                      input_shape=(32, 32, 32, image_channels)))
    model.add(layers.MaxPooling3D((2, 2, 2)))

    model.add(layers.Conv3D(64, (3, 3, 3), activation='relu'))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.Dropout(0.2))

    model.add(layers.Flatten())

    model.add(layers.Dense(216, activation='relu'))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(108, activation='relu'))
    model.add(layers.Dropout(0.5))

    model.add(
        layers.Dense(1, kernel_initializer='normal',
                     activation='sigmoid'))  ##Couche de sortie

    #model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.Adam(lr=1e-3))

    return model
Пример #5
0
def net27():
    model = Sequential()

    # Inputs are (27,27,103)
    # Conv

    #model.add(L.Lambda(data_to_img, input_shape=(8), output_shape=(103,27,27,1)))
    model.add(
        L.Conv3D(32, (32, 4, 4),
                 activation='relu',
                 input_shape=(103, 27, 27, 1)))
    model.add(L.MaxPooling3D(pool_size=(1, 2, 2)))

    model.add(L.Conv3D(64, (32, 5, 5), activation='relu'))
    model.add(L.MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(L.Dropout(.5))

    model.add(L.Conv3D(128, (32, 4, 4), activation='relu'))
    model.add(L.Dropout(.5))

    # Fully Connected
    model.add(L.Flatten())
    model.add(L.Dense(128, activation='relu'))
    #model.add(L.Conv2D(1,128,1 activation='relu'))
    model.add(L.Dropout(.5))
    model.add(L.Dense(9, activation='softmax'))
    #model.add(L.Conv2D(1,128,1 activation='softmax'))

    # Loss
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    plot_model(model, show_shapes=True, to_file='model.png')
    return model
Пример #6
0
def build_model(model_type='regression', conv_layer_sizes=(16, 16, 16), dense_layer_size=16, dropout_rate=0.5):
    """
    """
    # make sure requested model type is valid
    if model_type not in ['regression', 'classification']:
        print('Requested model type {0} is invalid'.format(model_type))
        sys.exit(1)
        
    # instantiate a 3D convnet
    model = models.Sequential()
    model.add(layers.Conv3D(filters=conv_layer_sizes[0], kernel_size=(3, 3, 3), input_shape=(16, 16, 16, 14)))
    model.add(layers.Activation(activation='relu'))
    for c in conv_layer_sizes[1:]:
        model.add(layers.Conv3D(filters=c, kernel_size=(3, 3, 3)))
        model.add(layers.Activation(activation='relu'))
    model.add(layers.MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(rate=dropout_rate))
    model.add(layers.Dense(units=dense_layer_size, activation='relu'))
    model.add(layers.Dropout(rate=dropout_rate))
    
    # the last layer is dependent on model type
    if model_type == 'regression':
        model.add(layers.Dense(units=1))
    else:
        model.add(layers.Dense(units=3, activation='softmax'))
        model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=0.0001),
                      metrics=['accuracy'])
    
    return model
Пример #7
0
def load_model(model_choice):

    model = km.Sequential()
    model.add(
        kl.Conv3D(64,
                  kernel_size=(6, 5, 5),
                  input_shape=(6, 8, 8, 1),
                  activation='tanh'))
    model.add(
        kl.Conv3D(64,
                  kernel_size=(1, 3, 3),
                  input_shape=(6, 8, 8, 1),
                  activation='tanh'))
    model.add(
        kl.Conv3D(64,
                  kernel_size=(1, 1, 1),
                  input_shape=(6, 8, 8, 1),
                  activation='tanh'))
    model.add(kl.Flatten())
    model.add(kl.Dense(name='output', units=64))
    model.compile(optimizer='rmsprop',
                  metrics=['accuracy', 'categorical_accuracy'],
                  loss='mean_squared_error')

    if model_choice == "move from":
        model.load_weights('Kasparov_moveFrom_complex_400.hdf5')
    else:
        model.load_weights('Kasparov_moveTo_complex_400.hdf5')

    return (model)
Пример #8
0
def get_model():
    '''
    :return: возвращает арзитектуру сети для Yolo
    '''
    input_image = layers.Input(shape=(IMAGE_H/striding ,IMAGE_W/striding ,IMAGE_D/striding ,1))
    x = layers.Conv3D(filters = 16 ,kernel_size=(10 ,10 ,10) ,strides = (1 ,1 ,1) ,padding = 'same', name = 'conv_0_1') \
        (input_image)
    x = layers.BatchNormalization(name='norm_0_1')(x)
    x = layers.advanced_activations.LeakyReLU(alpha=ALPHA)(x)
    x = layers.MaxPool3D(pool_size=(2 ,2 ,1))(x)

    x = layers.Conv3D(filters = 16 ,kernel_size=(10 ,10 ,10) ,strides = (1 ,1 ,1) ,padding = 'same' ,name = 'conv_1_1')(x)
    x = layers.BatchNormalization(name='norm_1_1')(x)
    x = layers.advanced_activations.LeakyReLU(alpha=ALPHA)(x)
    x = layers.MaxPool3D(pool_size=(4 ,4 ,4))(x)

    x = layers.Conv3D(filters = 16 ,kernel_size=(10 ,10 ,10) ,strides = (1 ,1 ,1) ,padding = 'same')(x)
    x = layers.BatchNormalization(name='norm_5_1')(x)
    x = layers.advanced_activations.LeakyReLU(alpha=ALPHA)(x)
    x = layers.MaxPool3D(pool_size=(4 ,2 ,2))(x)

    x = layers.Conv3D(filters= (4+1+num_classes )*num_boxes, kernel_size=(1 ,1 ,1), strides =(1 ,1 ,1), padding = 'same', name= 'yolo')(x)
    output = layers.Reshape((GRID_H ,GRID_W ,GRID_D ,num_boxes, 4 + 1 + num_classes))(x)
    model = models.Model(input_image, output)
    return model
Пример #9
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2, 2),
               use_bias=True):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    """
    nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'

    x = KL.Conv3D(nb_filter2, (kernel_size, kernel_size, kernel_size),
                  padding='same',
                  strides=strides,
                  name=conv_name_base + '2b',
                  use_bias=use_bias)(input_tensor)
    x = KL.Activation('relu')(x)

    x = KL.Conv3D(nb_filter3, (kernel_size, kernel_size, kernel_size),
                  padding='same',
                  name=conv_name_base + '2c',
                  use_bias=use_bias)(x)

    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x
Пример #10
0
def create_voxnet_model_homepage(input_shape, output_size):
    """
    Creates a small VoxNet.

    See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf

    Note: This is the latest model that the VoxNet-authors used.

    Args:
        input_shape (shape): Input-shape.
        output_size (int): Output-size.

    Returns:
        Model: A model.
    """

    # Trainable params: 916,834
    model = models.Sequential(name="VoxNetHomepage")
    model.add(layers.Reshape(target_shape=input_shape + (1,), input_shape=input_shape))
    model.add(layers.Conv3D(32, (5, 5, 5), strides=(2, 2, 2), activation="relu"))
    model.add(layers.Conv3D(32, (3, 3, 3), strides=(1, 1, 1), activation="relu"))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation="relu"))
    model.add(layers.Dense(output_size))

    return model
Пример #11
0
def residual_block(y,
                   nb_channels_in,
                   nb_channels_out,
                   strides=(1, 1),
                   project_shortcut=False):
    shortcut = y
    y = layers.Conv3D(nb_channels_in,
                      kernel_size=(1, 1),
                      strides=(1, 1),
                      padding='same')(y)
    y = add_common_layers(y)
    y = grouped_convolution(y, nb_channels_in, strides=strides)
    y = add_common_layers(y)
    y = layers.Conv3D(nb_channels_out,
                      kernel_size=(1, 1),
                      strides=(1, 1),
                      padding='same')(y)
    y = layers.BatchNormalization()(y)
    if project_shortcut or strides != (1, 1):
        shortcut = layers.Conv3D(nb_channels_out,
                                 kernel_size=(1, 1),
                                 strides=strides,
                                 padding='same')(shortcut)
        shortcut = layers.BatchNormalization()(shortcut)

    y = layers.add([shortcut, y])
    y = layers.LeakyReLU()(y)
    return y
Пример #12
0
def conv_block3D(x, growth_rate, name):
    """A building block for a dense block.

    # Arguments
        x: input tensor.
        growth_rate: float, growth rate at dense layers.
        name: string, block label.

    # Returns
        Output tensor for the block.
    """
    bn_axis = 4
    x1 = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_0_bn')(x)
    x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
    x1 = layers.Conv3D(4 * growth_rate,
                       1,
                       use_bias=False,
                       name=name + '_1_conv')(x1)
    x1 = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_1_bn')(x1)
    x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
    x1 = layers.Conv3D(growth_rate,
                       3,
                       padding='same',
                       use_bias=False,
                       name=name + '_2_conv')(x1)
    x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
    return x
Пример #13
0
    def __init__(self, shape):
        self.re_rate = 0.9
        self.model = models.Sequential()
        self.model.add(
            layers.Conv3D(16, (3, 3, 3),
                          kernel_regularizer=regularizers.l2(self.re_rate),
                          input_shape=shape))
        self.model.add(layers.ReLU())
        self.model.add(
            layers.Conv3D(16, (3, 3, 3),
                          kernel_regularizer=regularizers.l2(self.re_rate)))
        self.model.add(layers.ReLU())
        self.model.add(layers.MaxPooling3D((2, 2, 2)))
        self.model.add(layers.Dropout(rate=0.25))

        self.model.add(
            layers.Conv3D(32, (3, 3, 3),
                          kernel_regularizer=regularizers.l2(self.re_rate)))
        self.model.add(layers.ReLU())
        self.model.add(
            layers.Conv3D(32, (3, 3, 3),
                          kernel_regularizer=regularizers.l2(self.re_rate)))
        self.model.add(layers.ReLU())
        self.model.add(layers.MaxPooling3D((2, 2, 2)))
        self.model.add(layers.Dropout(rate=0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(16))
        self.model.add(layers.Dense(4, activation='softmax'))
Пример #14
0
def create_voxnet_model_small(input_shape, output_size):
    """
    Creates a small VoxNet.

    See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf

    Args:
        input_shape (shape): Input-shape.
        output_size (int): Output-size.

    Returns:
        Model: A model.
    """

    #Trainable params: 301,378
    model = models.Sequential(name="C7-F32-P2-C5-F64-P2-D512")
    model.add(layers.Reshape(target_shape=input_shape + (1,), input_shape=input_shape))
    model.add(layers.Conv3D(32, (7, 7, 7), activation="relu"))
    model.add(layers.MaxPooling3D((4, 4, 4)))
    model.add(layers.Conv3D(64, (5, 5, 5), activation="relu"))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(output_size))

    return model
Пример #15
0
def get_model(width=64, height=64 ,depth=16):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((depth,width, height, 3))
   
    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu",padding = 'same')(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu",padding = 'same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu",padding = 'same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu",padding = 'same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=27, activation="softmax")(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
Пример #16
0
def D3GenerateModel_old(n_filter=64, number_of_class=1, input_shape=(16,144,144,1),activation_last='sigmoid', metrics=['mse', 'acc', auc],loss='mse', optimizer='adam',dropout=0.5, init='glorot_uniform'):
    filter_size =16
    model = Sequential()
    #1 layer
    model.add(layers.Conv3D(filters=filter_size, input_shape=input_shape,  kernel_size=(2,2,2), strides=(1,1, 1), 
                                padding='same', activation='relu'))
    model.add(layers.Conv3D(filters=filter_size, input_shape=input_shape,  kernel_size=(2,2,2), strides=(1,1, 1), 
                                padding='same', activation='relu'))
    model.add(layers.MaxPooling3D((1, 2,2), strides=(1,2,2), padding='valid'))
    #2 layer
    for i in range(1,5):
        model.add(layers.Conv3D(filters=filter_size, kernel_size=(2,2,2), strides=(1,1,1), 
                                    padding='same', activation='relu'))
        model.add(layers.Conv3D(filters=filter_size*i, kernel_size=(2,2,2), strides=(1,1,1), 
                                    padding='same', activation='relu'))
        model.add(layers.MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid'))
    model.add(layers.Flatten())
    model.add(layers.Dense(2048, activation='relu'))
    model.add(layers.Dropout(.5))
    model.add(layers.Dense(2048, activation='relu'))
    model.add(layers.Dropout(.5))
    model.add(layers.Dense(1, activation='linear', kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
    model.summary()
    model.compile(optimizer=keras.optimizers.sgd(lr=1e-4, nesterov=True),loss='hinge', metrics=metrics)#keras.optimizers.SGD
    return model
    def shared_decoder(mask_layer):
        recon_remove_dim = layers.Reshape(
            (H.value, W.value, D.value, A.value))(mask_layer)

        recon_1 = layers.Conv3D(filters=64,
                                kernel_size=1,
                                padding='same',
                                kernel_initializer='he_normal',
                                activation='relu',
                                name='recon_1')(recon_remove_dim)

        recon_2 = layers.Conv3D(filters=128,
                                kernel_size=1,
                                padding='same',
                                kernel_initializer='he_normal',
                                activation='relu',
                                name='recon_2')(recon_1)

        out_recon = layers.Conv3D(filters=1,
                                  kernel_size=1,
                                  padding='same',
                                  kernel_initializer='he_normal',
                                  activation='sigmoid',
                                  name='out_recon')(recon_2)

        return out_recon
Пример #18
0
    def __init__(self, shape):
        self.re_rate = 0.9
        self.model = models.Sequential()
        self.model.add(layers.Conv3D(16, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape))
        self.model.add(layers.BatchNormalization())
        self.model.add(layers.MaxPooling3D((2, 2, 2)))

        self.model.add(layers.Conv3D(32, (3, 3, 3), activation='relu', 
                                     kernel_regularizer=regularizers.l2(self.re_rate)))
        self.model.add(layers.BatchNormalization())
        self.model.add(layers.MaxPooling3D((2, 2, 2)))

        self.model.add(layers.Conv3D(64, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate)))
        self.model.add(layers.BatchNormalization())
        self.model.add(layers.MaxPooling3D((2, 2, 2)))

        self.model.add(layers.Conv3D(128, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate)))
        self.model.add(layers.BatchNormalization())
        self.model.add(layers.MaxPooling3D((2, 2, 2)))
        
        
        self.model.add(layers.Flatten())
        
        # three rate from 0.8 to 0.6 and the first dense from 64 to 128,
        # while dropout rate between dense changed from 0.2 to 0.5
        # then, on one side, it can't overfit all train data, while the acc of val dataset
        # can be reduce when the train dataset's acc is too high
        
        # four change: the first dropout's rate from 0.6 to 0.7
        # the sencond Dense's kernel number from 32 to 64
        # overfit to train set but not up to 1 just 92%
        # fifth change: add conv and BN and change the first dropout's rate from 0.6 to 0.7
        # acc at train dataset is above 98%, while it's just 63% at val dataset
        # sixth: double these conv with kernel size =(1, 1, 1), and change the second dropout rate from 0.5 to 0.6
        # acc at val dataset is 64.4%
        # seventh: change the kernel size from (1, 1, 1) to (3, 3, 3) and add padding='same'
        # after change, this becomes come overfit even train dataset
        # thus change network to sixth and then double the conv kernel number and change the first 
        # dropout rate from 0.7 to 0.8 and change the second rate from 0.6 to 0.7, find this can't overfit
        # change the second dropout rate from 0.7 to  0.6, can't overfit
        # next, change first dropout rate from 0.8 to 0.7 with the second dropout rate is 0.6
        
        self.model.add(layers.Dropout(rate=0.7))

        self.model.add(layers.Dense(128, activation='relu'))
        # one change add dropout rate = 0.3 can't overfit
        # two change change rate from 0.3 to 0.2, can't overfit but the train set's
        # acc is close to val set
        self.model.add(layers.Dropout(rate=0.6))
        # end
        self.model.add(layers.Dense(64, activation='relu'))
        # self.model.add(layers.Dense(8, activation='relu'))
        # self.model.add(layers.Dense(3, activation='softmax'))
        self.model.add(layers.Dense(1, activation='sigmoid'))
Пример #19
0
def weighted_sampling_layers(x):
    x = layers.Conv3D(128, kernel_size=(5, 3, 3), padding='same')(x)
    x = layers.Conv3D(5, kernel_size=(5, 3, 3), padding='same')(x)
    x = layers.Conv3D(1,
                      kernel_size=(1, 3, 3),
                      padding='same',
                      name='optical_flow_output')(x)
    #x = layers.Lambda(repeat_target, )(x)
    return (x)
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1

    in_channels = backend.int_shape(inputs)[channel_axis]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = layers.Conv3D(expansion * in_channels,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand_BN')(x)
        x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    if stride == 2:
        x = layers.ZeroPadding3D(padding=correct_pad(backend, x, 3),
                                 name=prefix + 'pad')(x)
    x = DepthwiseConv3D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same' if stride == 1 else 'valid',
                        name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise_BN')(x)

    x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = layers.Conv3D(pointwise_filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      activation=None,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return layers.Add(name=prefix + 'add')([inputs, x])
    return x
Пример #21
0
def reduced_irn2_stem(input_tensor):
    x = layers.Conv3D(32, (3,3,3), padding='valid', strides=1, activation='relu')(input_tensor)
    x = layers.Conv3D(32, (3,3,3), padding='valid', activation='relu')(x)
    x = layers.Conv3D(64, (3,3,3), padding='same', activation='relu')(x)

    mp_1 = layers.MaxPooling3D((3,3,3), strides=2)(x)
    x = layers.Conv3D(96, (3,3,3), padding='valid', strides=2, activation='relu')(x)
    out = layers.concatenate([mp_1, x], axis=-1)
   
    return out
def ConvBlock(x, channels):

    out = layers.Conv3D(channels, (2, 2, 2),
                        padding='valid', strides=2)(x)
    out = layers.LeakyReLU()(out)
    out = layers.Conv3D(channels, (3, 3, 3),
                        padding='same', strides=1)(out)
    out = layers.LeakyReLU()(out)

    return out
Пример #23
0
    def residual_block(y,
                       nb_channels_in,
                       nb_channels_out,
                       _strides=(1, 1, 1),
                       _project_shortcut=False):
        """
        Our network consists of a stack of residual blocks. These blocks have the same topology,
        and are subject to two simple rules:

        - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
        - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
        """
        shortcut = y

        # we modify the residual building block as a bottleneck design to make the network more economical
        y = layers.Conv3D(
            nb_channels_in,
            kernel_size=(1, 1, 1),
            strides=(1, 1, 1),
            padding='same',
            kernel_regularizer=regularizers.l2(L2_regularizer))(y)
        y = add_common_layers(y)

        # ResNeXt (identical to ResNet when `cardinality` == 1)
        y = grouped_convolution(y, nb_channels_in, _strides=_strides)
        y = add_common_layers(y)

        y = layers.Conv3D(
            nb_channels_out,
            kernel_size=(1, 1, 1),
            strides=(1, 1, 1),
            padding='same',
            kernel_regularizer=regularizers.l2(L2_regularizer))(y)
        # batch normalization is employed after aggregating the transformations and before adding to the shortcut
        y = layers.BatchNormalization()(y)

        # identity shortcuts used directly when the input and output are of the same dimensions
        if _project_shortcut or _strides != (1, 1, 1):
            # when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)
            # when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2
            shortcut = layers.Conv3D(
                nb_channels_out,
                kernel_size=(1, 1, 1),
                strides=_strides,
                padding='same',
                kernel_regularizer=regularizers.l2(L2_regularizer))(shortcut)
            shortcut = layers.BatchNormalization()(shortcut)

        y = layers.Add()([shortcut, y])

        # relu is performed right after each batch normalization,
        # expect for the output of the block where relu is performed after the adding to the shortcut
        y = layers.Activation('relu')(y)

        return y
Пример #24
0
 def conv_block(x, *ofs):
     for l in [
         layers.Conv3D(ofs[0], (3, 3, 3), padding='same'),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Conv3D(ofs[1], (3, 3, 3), padding='same'),
         layers.BatchNormalization(),
         layers.ReLU()
     ]:
         x = l(x)
     return x
Пример #25
0
def vanilla_base_elu(input_tensor):
    x = layers.Conv3D(32, (3,3,3), activation='elu')(input_tensor)
    x = layers.MaxPooling3D((2,2,2))(x)
    x = layers.Conv3D(64, (3,3,3), activation='elu')(x)
    x = layers.MaxPooling3D((2,2,2))(x)
    x = layers.Conv3D(128, (3,3,3), activation='elu')(x)
    x = layers.MaxPooling3D((2,2,2))(x)
    x = layers.Conv3D(256, (3,3,3), activation='elu')(x)
    x = layers.MaxPooling3D((2,2,2))(x)
    x = layers.Flatten()(x)

    return x
Пример #26
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2, 2),
               use_bias=True,
               train_bn=True):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        use_bias: Boolean. To use or not use a bias in conv layers.
        train_bn: Boolean. Train or freeze Batch Norm layers
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = KL.Conv3D(nb_filter1, (1, 1, 1),
                  strides=strides,
                  name=conv_name_base + '2a',
                  use_bias=use_bias)(input_tensor)
    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv3D(nb_filter2, (kernel_size, kernel_size, kernel_size),
                  padding='same',
                  name=conv_name_base + '2b',
                  use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv3D(nb_filter3, (1, 1, 1),
                  name=conv_name_base + '2c',
                  use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)

    shortcut = KL.Conv3D(nb_filter3, (1, 1, 1),
                         strides=strides,
                         name=conv_name_base + '1',
                         use_bias=use_bias)(input_tensor)
    shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)

    x = KL.Add()([x, shortcut])
    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x
Пример #27
0
def res_layer(input_tensor, kernel_size, filter, d, stage, block):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        d. amount of dilation
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        weights: initializing weights
    # Returns
        Output tensor for the block.
    """
    bn_axis = 4

    conv_name_base = 'res_' + str(stage) + "_" + str(block) + 'conv'
    bn_name_base = 'res_' + str(stage) + "_" + str(block) + '_bn'

    x = layers.BatchNormalization(axis=bn_axis,
                                  name=bn_name_base + '_0')(input_tensor)
    x = layers.Activation('relu')(x)
    x = layers.Conv3D(filter,
                      kernel_size,
                      kernel_initializer='he_normal',
                      padding='same',
                      strides=(1, 1, 1),
                      dilation_rate=d,
                      use_bias=False,
                      name=conv_name_base + '_0')(x)

    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '_1')(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv3D(filter,
                      kernel_size,
                      padding='same',
                      strides=(1, 1, 1),
                      dilation_rate=d,
                      use_bias=False,
                      kernel_initializer='he_normal',
                      name=conv_name_base + '_1')(x)

    # Sum the layers, either by
    n_param = x.shape[-1]
    input_tensor_pad = layers.Lambda(padding,
                                     arguments={"n_param":
                                                n_param})(input_tensor)
    x = layers.Add()([x, input_tensor_pad])

    return x
Пример #28
0
def naive_inception(input_shape=(64,64,64,2)):
    input_tensor = tensor_input(input_shape)
    i1 = naive_inception_block(input_tensor, 16,
                                              8, 16,
                                              8, 16,
                                             16)
    r1 = basic_reduction_block(i1, 16, 32,
                                   16, 24, 32)

    x = layers.Conv3D(128, (3,3,3), activation='relu')(r1)
    x = layers.MaxPooling3D((2,2,2))(x)


    i2 = naive_inception_block(x            , 32,
                                              16, 32,
                                              16, 32,
                                             32)
    r2 = basic_reduction_block(i2, 32, 64,
                                   32, 50, 64)

    n = layers.Conv3D(512, (3,3,3), activation='relu')(r2)
    n = layers.MaxPooling3D((2,2,2))(n)
    n = layers.Conv3D(512, (2,2,2), activation='relu')(n)
    n = layers.Flatten()(n)
    n = layers.Dropout(0.2)(n)

    n = layers.Dense(1024, activation='relu')(n)
    n = layers.Dense(512, activation='relu')(n)
    n = layers.Dense(128, activation='relu')(n)
    n = layers.Dense(64, activation='relu')(n)

    density = head(n, 'density')
    detvel = head(n, 'detvel')
    detpres = head(n, 'detpres')
    dipole = head(n, 'dipole')
    energy = head(n, 'energy')
    hof = head(n, 'hof')
    temp = head(n, 'temp')
    gap = head(n, 'gap')

    out_list = [density, detvel, detpres,
                dipole, energy, hof,
                temp, gap]

    model = models.Model(input_tensor, out_list)
    model.compile(optimizer='adam',
                  loss=['mse']*len(out_list),
                  loss_weights=loss_weights,
                  metrics=['mae'])

    return model
Пример #29
0
def irn2_ra(input_tensor):
    x = layers.Activation('relu')(input_tensor)

    b_1 = layers.MaxPooling3D((3,3,3), padding='valid', strides=2)(x)

    b_2 = layers.Conv3D(192, (3,3,3), padding='valid', strides=2, activation='relu')(x)

    b_3 = layers.Conv3D(128, (1,1,1), padding='same', activation='relu')(x)
    b_3 = layers.Conv3D(128, (3,3,3), padding='same', activation='relu')(b_3)
    b_3 = layers.Conv3D(192, (3,3,3), padding='valid', strides=2, activation='relu')(b_3)

    cat = layers.concatenate([b_1, b_2, b_3])

    return cat
Пример #30
0
def D3GenerateModel(n_filter=16, number_of_class=2, input_shape=(16,144,144,1),activation_last='sigmoid', metrics=['mse', 'acc'],loss='mse', optimizer='adam',dropout=0.5, init='glorot_uniform'):
    filter_size =n_filter
    model = Sequential()
    model.add(layers.Conv3D(filters=filter_size, input_shape=input_shape,  kernel_size=(3,3,3), strides=(1,1, 1), 
                                padding='valid', activation='selu'))
    model.add(layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1, 2,2), 
                                padding='valid', activation='selu'))
    model.add(layers.MaxPooling3D((1, 2,2), padding='valid'))
    model.add(layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1,1,1), 
                                padding='valid', activation='selu'))
    model.add(layers.Conv3D(filters=filter_size*4, kernel_size=(3,3,3), strides=(1, 2,2), 
                                padding='valid', activation='selu'))
    model.add(layers.MaxPooling3D((1, 2,2), padding='valid'))
    model.add(layers.Conv3D(filters=filter_size*4, kernel_size=(3,3,3), strides=(1,1, 1), 
                                padding='valid', activation='selu'))
    model.add(layers.Conv3D(filters=filter_size*8, kernel_size=(3,3,3), strides=(1, 2,2), 
                                padding='valid', activation='selu'))
    model.add(layers.MaxPooling3D((1,2, 2), padding='same'))
    model.add(layers.Conv3D(filters=filter_size*16, kernel_size=(3,3,3), strides=(1,1, 1), 
                                padding='same', activation='selu'))
    model.add(layers.Conv3D(filters=filter_size*32, kernel_size=(3,3,3), strides=(2,2, 2), 
                                padding='same', activation='selu'))
    
    #model.add(layers.MaxPooling2D((2, 2), padding='valid'))
    model.add(layers.GlobalMaxPooling3D())
    #Encoder
    model.add(layers.Dense(512, activation='selu'))
    model.add(keras.layers.Dropout(0.5))
    model.add(layers.Dense(256, activation='selu'))
    model.add(layers.Dense(2, activation='softmax'))#, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
    model.summary()
    model.compile(optimizer=keras.optimizers.adam(lr=2e-6),loss='categorical_crossentropy', metrics=metrics)
    return model