예제 #1
0
def get_model():
    model = models.Sequential()
    model.add(layers.Reshape((30, 30, 30, 1), input_shape=(30, 30, 30, 1)))
    model.add(layers.Conv3D(16, 6, strides=2, activation='relu', padding='same'))
    model.add(layers.Conv3D(64, 5, strides=2, activation='relu', padding='same'))
    model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(10, activation='softmax'))
    return model
예제 #2
0
def decoder_block(input_tensor,concat_tensor, num_filters):
    decoder = layers.Conv3DTranspose(num_filters,(2,2,2),strides=(2,2,2),padding='same')(input_tensor)
    decoder = layers.concatenate([concat_tensor,decoder],axis=-1)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv3D(num_filters,kernel_size=[3,3,3],padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv3D(num_filters,kernel_size=[3,3,3],padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv3D(num_filters,kernel_size=[3,3,3],padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    return decoder
def Construct3DUnetModel(input_images, nclasses, use_bn = True, use_dropout = True):
    with name_scope("contract1"):
        x, contract1 = CreateConv3DBlock(input_images, (32, 64), n = 2, use_bn = use_bn, name = 'contract1')

    with name_scope("contract2"):
        x, contract2 = CreateConv3DBlock(x, (64, 128), n = 2, use_bn = use_bn, name = 'contract2')

    with name_scope("contract3"):
        x, contract3 = CreateConv3DBlock(x, (128, 256), n = 2, use_bn = use_bn, name = 'contract3')

    with name_scope("contract4"):
        x, _ = CreateConv3DBlock(x, (256, 512), n = 2, use_bn = use_bn, apply_pooling = False, name = 'contract4')

    with name_scope("dropout"):
        if use_dropout:
            x = klayers.Dropout(0.5, name='dropout')(x)

    with name_scope("expand3"):
        x = CreateUpConv3DBlock(x, [contract3], (256, 256), n = 2, use_bn = use_bn, name = 'expand3')

    with name_scope("expand2"):
        x = CreateUpConv3DBlock(x, [contract2], (128, 128), n = 2, use_bn = use_bn, name = 'expand2')

    with name_scope("expand1"):
        x = CreateUpConv3DBlock(x, [contract1], (64, 64), n = 2, use_bn = use_bn, name = 'expand1')

    with name_scope("segmentation"):
        layername = 'segmentation_{}classes'.format(nclasses)
        x = klayers.Conv3D(nclasses, (1,1,1), activation='softmax', padding='same', name=layername)(x)

    return x
예제 #4
0
 def _build(self, params) -> None:
     """
     Builds the convolutional network.
     use_bias=False in FC/CONV nets because of this article:
     https://www.dlology.com/blog/one-simple-trick-to-train-keras-model-faster-with-batch-normalization/
     """
     init = layers.Input(shape=((params.image_size, params.image_size,
                                 params.in_channels, 1)))
     x = layers.Conv3D(filters=32,
                       kernel_size=(13, 13, 3),
                       padding='same',
                       activation='selu')(init)
     x = layers.MaxPool3D(pool_size=(3, 3, 2),
                          strides=(2, 2, 1),
                          padding='valid')(x)
     x = layers.Conv3D(filters=32,
                       kernel_size=(7, 7, 3),
                       padding='same',
                       activation='selu')(x)
     x = layers.MaxPool3D(pool_size=(3, 3, 2),
                          strides=(2, 2, 1),
                          padding='valid')(x)
     x = layers.Conv3D(filters=48,
                       kernel_size=(3, 3, 2),
                       padding='same',
                       activation='selu')(x)
     x = layers.Conv3D(filters=48,
                       kernel_size=(3, 3, 2),
                       padding='same',
                       activation='selu',
                       name="retrieve")(x)
     x = layers.MaxPool3D(pool_size=(3, 3, 2),
                          strides=(2, 2, 1),
                          padding='valid')(x)
     x = layers.Flatten()(x)
     x = layers.Dense(80)(x)
     # end of siamese network. We concatenate the output into an array so we can load weights in the right way
     x = layers.Dense(units=160, name='dense_last',
                      activation='selu')(tf.concat([x, x], axis=1))
     out = layers.Dense(units=2, name='classifier', activation='sigmoid')(x)
     self._model = tf.keras.Model(inputs=init, outputs=out)
def CreateConv3DBlock(x, filters, n = 2, use_bn = True, apply_pooling = True, name = 'convblock'):
    for i in range(n):
        x = klayers.Conv3D(filters[i], (3,3,3), padding='valid', name=name+'_conv'+str(i+1))(x)
        if use_bn:
            x = klayers.BatchNormalization(name=name+'_BN'+str(i+1))(x)
        x = klayers.Activation('relu', name=name+'_relu'+str(i+1))(x)

    convresult = x

    if apply_pooling:
        x = klayers.MaxPool3D(pool_size=(2,2,2), name=name+'_pooling')(x)

    return x, convresult
예제 #6
0
    def discriminator_model(self):
        model = tf.keras.Sequential()

        # Convolution block 1
        model.add(kl.Conv3D(filters=self.crop_size,
                                         input_shape=(self.num_frames, self.crop_size, self.crop_size, 3),
                                         kernel_size=4, strides=2, padding='same', kernel_initializer=self.conv_init))
        model.add(kl.Lambda(lambda x: tf.contrib.layers.layer_norm(x)))
        model.add(kl.LeakyReLU(.2))

        # Convolution block 2
        model.add(kl.Conv3D(filters=self.crop_size * 2, kernel_size=4, strides=2, padding='same',
                                         kernel_initializer=self.conv_init))
        model.add(kl.Lambda(lambda x: tf.contrib.layers.layer_norm(x)))
        model.add(kl.LeakyReLU(.2))

        # Convolution block 3
        model.add(kl.Conv3D(filters=self.crop_size * 4, kernel_size=4, strides=2, padding='same',
                                         kernel_initializer=self.conv_init))
        model.add(kl.Lambda(lambda x: tf.contrib.layers.layer_norm(x)))
        model.add(kl.LeakyReLU(.2))

        # Convolution block 4
        model.add(kl.Conv3D(filters=self.crop_size * 8, kernel_size=4, strides=2, padding='same',
                                         kernel_initializer=self.conv_init))
        model.add(kl.Lambda(lambda x: tf.contrib.layers.layer_norm(x)))
        model.add(kl.LeakyReLU(.2))

        # Convolution block 5
        model.add(kl.Conv3D(filters=1, kernel_size=4, strides=2, padding='same',
                                         kernel_initializer=self.conv_init))
        model.add(kl.LeakyReLU(.2))

        # Linear block
        model.add(kl.Flatten())
        model.add(kl.Dense(1, kernel_initializer=tf.keras.initializers.random_normal(stddev=0.01)))

        return model
예제 #7
0
def _conv3d_bn(x,
               filters,
               num_step,
               num_row,
               num_col,
               padding='same',
               strides=(1, 1, 1),
               name=None):
    """Utility function to apply convolution and batch normalization.

    Arguments:
        x: The input tensor.
        filters: The filters in `Conv3D`.
        num_step: The depth of the convolution kernel.
        num_row: The height of the convolution kernel.
        num_col: The width of the convolution kernel.
        padding: The padding mode in `Conv3D`.
        strides: The strides in `Conv3D`.
        name: The name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch
              norm layer.

    Returns:
        The output tensor after applying `Conv3D` and `BatchNormalization`.
    """
    if name is not None:
        bn_name = name + '_bn'
        conv_name = name + '_conv'
    else:
        bn_name = None
        conv_name = None

    x = layers.Conv3D(filters, (num_step, num_row, num_col),
                      strides=strides,
                      padding=padding,
                      use_bias=False,
                      name=conv_name)(x)
    x = layers.BatchNormalization(axis=_CHANNEL_AXIS,
                                  scale=False,
                                  name=bn_name)(x)
    x = layers.Activation('relu', name=name)(x)

    return x
예제 #8
0
def Convolutional_LSTM(n_frames, width, height, channels):
    # take input movies of shape ( )
    input = layers.Input(shape=(n_frames, width, height, channels))

    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(input)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv3D(filters=1, kernel_size=(3, 3, 3), padding='same', data_format='channels_last')(x)
    x = layers.Activation('sigmoid')(x)

    conv_lstm = tf.keras.models.Model(input=input, output=x)
    conv_lstm.compile(optimizer='adadelta', loss='binary_crossentropy')

    # return a movie of identical shape
    return conv_lstm
예제 #9
0
def CreateUpConv3DBlock(x,
                        contractpart,
                        filters,
                        n=2,
                        use_bn=True,
                        name='upconvblock'):
    # upconv x
    x = klayers.Conv3DTranspose((int)(x.shape[-1]), (2, 2, 2),
                                strides=(2, 2, 2),
                                padding='same',
                                use_bias=False,
                                name=name + '_upconv')(x)
    # concatenate contractpart and x
    c = [(i - j) // 2 for (
        i,
        j) in zip(contractpart[0].shape[1:4].as_list(), x.shape[1:4].as_list())
         ]
    contract_crop = klayers.Cropping3D(cropping=((c[0], c[0]), (c[1], c[1]),
                                                 (c[2],
                                                  c[2])))(contractpart[0])
    if len(contractpart) > 1:
        crop1 = klayers.Cropping3D(cropping=((c[0], c[0]), (c[1], c[1]),
                                             (c[2], c[2])))(contractpart[1])
        #crop2 = klayers.Cropping3D(cropping=((c[0],c[0]),(c[1],c[1]),(c[2],c[2])))(contractpart[2])
        #x = klayers.concatenate([contract_crop, crop1, crop2, x])
        x = klayers.concatenate([contract_crop, crop1, x])
    else:
        x = klayers.concatenate([contract_crop, x])

    # conv x 2 times
    for i in range(n):
        x = klayers.Conv3D(filters[i], (3, 3, 3),
                           padding='valid',
                           name=name + '_conv' + str(i + 1))(x)
        if use_bn:
            x = klayers.BatchNormalization(name=name + '_BN' + str(i + 1))(x)
        x = klayers.Activation('relu', name=name + '_relu' + str(i + 1))(x)

    return x
예제 #10
0
def convolution():

    inn = layers.Input(shape=(sequence_length, alpha_len, embedding_dimension,
                              1))
    cnns = []
    for i, size in enumerate(filter_sizes):
        conv = layers.Conv3D(
            filters=2,
            kernel_size=([size, alpha_len, embedding_dimension]),
            strides=[size, 1, 1],
            padding='valid',
            activation='relu')(inn)
        #if i%2:
        pool_size = int(conv.shape[1] / 100)
        pool = layers.MaxPool3D(pool_size=([pool_size, 1, 1]),
                                padding='valid')(conv)
        #pool = MaxMin(pool_size)(conv)

        cnns.append(pool)
    outt = layers.concatenate(cnns)

    model = keras.Model(inputs=inn, outputs=outt, name='cnns')
    model.summary()
    return model
예제 #11
0
def conv(input_tensor, num_filters):
    encoder = layers.Conv3D(num_filters, kernel_size=[3, 3, 3], padding='same')(input_tensor)
    encoder = layers.BatchNormalization()(encoder)
    encoder = layers.Activation('relu')(encoder)
    return encoder
예제 #12
0
def output(decoder0):
    with tf.name_scope('outputs'):
        outputs = layers.Conv3D(1, (1,1,1), activation='sigmoid',name='outputs')(decoder0)
        return outputs
예제 #13
0
    def __init__(self, params, **kwargs):
        super(SiameseConv, self).__init__(**kwargs)
        self.img_size = params.image_size
        in_channels = params.in_channels
        out_channels = params.out_channels
        num_labels = params.num_labels
        bn_momentum = params.bn_momentum

        self.conv0 = layers.Conv3D(filters=32,
                                   kernel_size=(13, 13, 3),
                                   padding='same',
                                   activation='relu',
                                   input_shape=(self.img_size, self.img_size,
                                                in_channels, 1),
                                   name="pippo")
        # self.bn0 = layers.BatchNormalization(momentum=bn_momentum)
        self.maxpool0 = layers.MaxPool3D(pool_size=(3, 3, 2),
                                         strides=(2, 2, 1),
                                         padding='valid')
        # self.maxpool0 = layers.Conv3D(filters=16, kernel_size=(3, 3, 2), strides=(2, 2, 1), padding='valid')

        self.conv1 = layers.Conv3D(filters=32,
                                   kernel_size=(7, 7, 3),
                                   padding='same',
                                   activation='relu')
        # self.bn1 = layers.BatchNormalization(momentum=bn_momentum)
        self.maxpool1 = layers.MaxPool3D(pool_size=(3, 3, 2),
                                         strides=(2, 2, 1),
                                         padding='valid')
        # self.maxpool1 = layers.Conv3D(filters=16, kernel_size=(3, 3, 2), strides=(2, 2, 1), padding='valid')

        self.conv2 = layers.Conv3D(filters=48,
                                   kernel_size=(3, 3, 2),
                                   padding='same',
                                   activation='relu')
        # self.bn2 = layers.BatchNormalization(momentum=bn_momentum)
        # self.maxpool2 = layers.MaxPool3D(pool_size=(3, 3, 2), strides=(2, 2, 1), padding='valid')

        self.conv3 = layers.Conv3D(filters=48,
                                   kernel_size=(3, 3, 2),
                                   padding='same',
                                   activation='relu')
        # self.bn3 = layers.BatchNormalization(momentum=bn_momentum)
        self.maxpool3 = layers.MaxPool3D(pool_size=(3, 3, 2),
                                         strides=(2, 2, 1),
                                         padding='valid')
        # self.maxpool3 = layers.Conv3D(filters=16, kernel_size=(3, 3, 2), strides=(2, 2, 1), padding='valid')

        # self.conv4 = layers.Conv3D(filters=48, kernel_size=(3, 3, 2), padding='same', activation='relu')
        # self.bn4 = layers.BatchNormalization(momentum=bn_momentum)
        # self.maxpool4 = layers.MaxPool3D(pool_size=(3, 3, 2), strides=(2, 2, 1), padding='valid')

        # self.conv5 = layers.Conv3D(filters=80, kernel_size=(3, 3, 3), padding='same', activation='relu')
        # self.bn5 = layers.BatchNormalization(momentum=bn_momentum)
        # self.maxpool5 = layers.MaxPool3D(pool_size=(3, 3, 2), strides=(1, 1, 1), padding='valid')

        self.flatten = layers.Flatten()
        self.fc_siamese = layers.Dense(80)
        # self.bn6 = layers.BatchNormalization(momentum=bn_momentum)
        # self.dropout_siamese = layers.Dropout(params.dropout_rate)

        # output = (None, 30, 30, 128) padding=valid means resize W and H to (W-ks+1)

        # ADD DENSE LAYER ON TOP
        # To complete our model, we will feed the last output tensor from the convolutional base (of shape (X, X, 64))
        # into one or more Dense layers to perform classification.
        # Dense layers take vectors as input (which are 1D), while the current output is a 3D tensor.
        # First, we will flatten (or unroll) the 3D output to 1D, then add one or more Dense layers on top.
        # Since we have 2 output classes, we use a final Dense layer with 2 outputs and a sigmoid activation.

        self.fc0 = layers.Dense(units=160,
                                name='dense_last',
                                activation='relu')
        # self.bn_last = layers.BatchNormalization(momentum=bn_momentum, name='batch_norm_last')
        # self.dropout_last = layers.Dropout(rate=params.dropout_rate)
        self.classifier = layers.Dense(units=num_labels,
                                       name='classifier',
                                       activation='sigmoid')