Ejemplo n.º 1
0
def build_3d_cnn(w, h, d, s, num_outputs):
    from keras.layers import Input, Dense
    from keras.models import Sequential
    from keras.layers import Conv3D, MaxPooling3D, Reshape, BatchNormalization, Merge
    from keras.layers import Activation, Dropout, Flatten, Cropping3D

    #Credit: https://github.com/jessecha/DNRacing/blob/master/3D_CNN_Model/model.py
    '''
        w : width
        h : height
        d : depth
        s : n_stacked
    '''
    input_shape=(s, h, w, d)

    model = Sequential()
    #First layer
    model.add(Cropping3D(cropping=((0,0), (50,10), (0,0)), input_shape=input_shape) ) #trim pixels off top
    
    # Second layer
    model.add(Conv3D(
        filters=16, kernel_size=(3,3,3), strides=(1,3,3),
        data_format='channels_last', border_mode='same')
    )
    model.add(Activation('relu'))
    model.add(MaxPooling3D(
        pool_size=(1,2,2), strides=(1,2,2), padding='valid', data_format=None)
    )
    # Third layer
    model.add(Conv3D(
        filters=32, kernel_size=(3,3,3), strides=(1,1,1),
        data_format='channels_last', border_mode='same')
    )
    model.add(Activation('relu'))
    model.add(MaxPooling3D(
        pool_size=(1, 2, 2), strides=(1,2,2), padding='valid', data_format=None)
    )
    # Fourth layer
    model.add(Conv3D(
        filters=64, kernel_size=(3,3,3), strides=(1,1,1),
        data_format='channels_last', border_mode='same')
    )
    model.add(Activation('relu'))
    model.add(MaxPooling3D(
        pool_size=(1,2,2), strides=(1,2,2), padding='valid', data_format=None)
    )
    # Fifth layer
    model.add(Conv3D(
        filters=128, kernel_size=(3,3,3), strides=(1,1,1),
        data_format='channels_last', border_mode='same')
    )
    model.add(Activation('relu'))
    model.add(MaxPooling3D(
        pool_size=(1,2,2), strides=(1,2,2), padding='valid', data_format=None)
    )
    # Fully connected layer
    model.add(Flatten())

    model.add(Dense(256))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(256))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(num_outputs))
    #model.add(Activation('tanh'))

    return model
Ejemplo n.º 2
0
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1),
            load_weight_path=None) -> Model:
    """Load the pre-trained 3D ConvNet that should be used to predict a nodule and its malignancy.

    Args:
        input_shape: shape of the input layer. Defaults to (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1).
        load_weight_path: path of the trained model weights.

    Returns:
        keras.models.Model
    """
    inputs = Input(shape=input_shape, name="input_1")
    x = inputs
    x = AveragePooling3D(pool_size=(2, 1, 1),
                         strides=(2, 1, 1),
                         padding="same")(x)
    x = Convolution3D(64, (3, 3, 3),
                      activation='relu',
                      padding='same',
                      name='conv1',
                      strides=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(1, 2, 2),
                     strides=(1, 2, 2),
                     padding='valid',
                     name='pool1')(x)

    # 2nd layer group
    x = Convolution3D(128, (3, 3, 3),
                      activation='relu',
                      padding='same',
                      name='conv2',
                      strides=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     padding='valid',
                     name='pool2')(x)

    # 3rd layer group
    x = Convolution3D(256, (3, 3, 3),
                      activation='relu',
                      padding='same',
                      name='conv3a',
                      strides=(1, 1, 1))(x)
    x = Convolution3D(256, (3, 3, 3),
                      activation='relu',
                      padding='same',
                      name='conv3b',
                      strides=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     padding='valid',
                     name='pool3')(x)

    # 4th layer group
    x = Convolution3D(512, (3, 3, 3),
                      activation='relu',
                      padding='same',
                      name='conv4a',
                      strides=(1, 1, 1))(x)
    x = Convolution3D(
        512,
        (3, 3, 3),
        activation='relu',
        padding='same',
        name='conv4b',
        strides=(1, 1, 1),
    )(x)
    x = MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     padding='valid',
                     name='pool4')(x)

    last64 = Convolution3D(64, (2, 2, 2), activation="relu", name="last_64")(x)
    out_class = Convolution3D(1, (1, 1, 1),
                              activation="sigmoid",
                              name="out_class_last")(last64)
    out_class = Flatten(name="out_class")(out_class)

    out_malignancy = Convolution3D(1, (1, 1, 1),
                                   activation=None,
                                   name="out_malignancy_last")(last64)
    out_malignancy = Flatten(name="out_malignancy")(out_malignancy)

    model = Model(input=inputs, output=[out_class, out_malignancy])
    model.load_weights(load_weight_path)

    model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True),
                  loss={
                      "out_class": "binary_crossentropy",
                      "out_malignancy": mean_absolute_error
                  },
                  metrics={
                      "out_class": [binary_accuracy, binary_crossentropy],
                      "out_malignancy": mean_absolute_error
                  })

    return model
Ejemplo n.º 3
0
           kernel_size=(5, 5, 5),
           activation='relu',
           padding='same',
           input_shape=input_shape,
           name='conv1_1'))
model.add(Dropout(0.25))

# 2nd layer group
model.add(
    Conv3D(32,
           kernel_size=(5, 5, 5),
           activation='relu',
           padding='same',
           input_shape=input_shape,
           name='conv1_2'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), padding='valid', name='pool1_2'))
model.add(BatchNormalization())
model.add(Dropout(0.25))

# FC Layers
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))

#output layer
model.add(BatchNormalization())
model.add(Dense(num_classes, activation='softmax'))

batch_size = 32
num_classes = 2
epochs = 50
Ejemplo n.º 4
0
def unet_model_3d(input_shape,
                  n_labels,
                  batch_normalization=False,
                  initial_learning_rate=0.00001,
                  metrics=m.dice_coef):
    """
    input_shape:without batch_size,(img_height,img_width,img_depth)
    metrics:
    """

    inputs = Input(input_shape)

    down_layer = []

    layer = inputs

    # down_layer_1
    layer = res_block_v2_3d(layer, 32, batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # down_layer_2
    layer = res_block_v2_3d(layer, 64, batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # down_layer_3
    layer = res_block_v2_3d(layer,
                            128,
                            batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # down_layer_4
    layer = res_block_v2_3d(layer,
                            256,
                            batch_normalization=batch_normalization)
    down_layer.append(layer)
    layer = MaxPooling3D(pool_size=[2, 2, 2],
                         strides=[2, 2, 2],
                         padding='same')(layer)

    print(str(layer.get_shape()))

    # bottle_layer
    layer = res_block_v2_3d(layer,
                            512,
                            batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_4
    layer = up_and_concate_3d(layer, down_layer[3])
    layer = res_block_v2_3d(layer,
                            256,
                            batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_3
    layer = up_and_concate_3d(layer, down_layer[2])
    layer = res_block_v2_3d(layer,
                            128,
                            batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_2
    layer = up_and_concate_3d(layer, down_layer[1])
    layer = res_block_v2_3d(layer, 64, batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # up_layer_1
    layer = up_and_concate_3d(layer, down_layer[0])
    layer = res_block_v2_3d(layer, 32, batch_normalization=batch_normalization)
    print(str(layer.get_shape()))

    # score_layer
    layer = Conv3D(n_labels, [1, 1, 1], strides=[1, 1, 1])(layer)
    print(str(layer.get_shape()))

    # softmax
    layer = Activation('softmax')(layer)
    print(str(layer.get_shape()))

    outputs = layer

    model = Model(inputs=inputs, outputs=outputs)

    metrics = [metrics]

    model = multi_gpu_model(model, gpus=2)
    model.summary()
    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss='categorical_crossentropy',
                  metrics=metrics)

    return model
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(
        description='simple 3D convolution for action recognition')
    parser.add_argument('--batch', type=int, default=128)
    parser.add_argument('--epoch', type=int, default=100)
    parser.add_argument('--videos', type=str, default='UCF101',
                        help='directory where videos are stored')
    parser.add_argument('--nclass', type=int, default=101)
    parser.add_argument('--output', type=str, required=True)
    parser.add_argument('--color', type=bool, default=False)
    parser.add_argument('--skip', type=bool, default=True)
    parser.add_argument('--depth', type=int, default=10)
    args = parser.parse_args()

    img_rows, img_cols, frames = 32, 32, args.depth
    channel = 3 if args.color else 1
    fname_npz = 'dataset_{}_{}_{}.npz'.format(
        args.nclass, args.depth, args.skip)

    vid3d = videoto3d.Videoto3D(img_rows, img_cols, frames)
    nb_classes = args.nclass
    if os.path.exists(fname_npz):
        loadeddata = np.load(fname_npz)
        X, Y = loadeddata["X"], loadeddata["Y"]
    else:
        x, y = loaddata(args.videos, vid3d, args.nclass,
                        args.output, args.color, args.skip)
        X = x.reshape((x.shape[0], img_rows, img_cols, frames, channel))
        Y = np_utils.to_categorical(y, nb_classes)

        X = X.astype('float32')
        np.savez(fname_npz, X=X, Y=Y)
        print('Saved dataset to dataset.npz.')
    print('X_shape:{}\nY_shape:{}'.format(X.shape, Y.shape))

    # Define model
    model = Sequential()
    model.add(Conv3D(32, kernel_size=(3, 3, 3), input_shape=(
        X.shape[1:]), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Conv3D(32, kernel_size=(3, 3, 3), border_mode='same'))
    model.add(Activation('softmax'))
    model.add(MaxPooling3D(pool_size=(3, 3, 3), border_mode='same'))
    model.add(Dropout(0.25))

    model.add(Conv3D(64, kernel_size=(3, 3, 3), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Conv3D(64, kernel_size=(3, 3, 3), border_mode='same'))
    model.add(Activation('softmax'))
    model.add(MaxPooling3D(pool_size=(3, 3, 3), border_mode='same'))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512, activation='sigmoid'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    model.compile(loss=categorical_crossentropy,
                  optimizer=Adam(), metrics=['accuracy'])
    model.summary()
    plot_model(model, show_shapes=True,
               to_file=os.path.join(args.output, 'model.png'))

    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=0.2, random_state=43)

    history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), batch_size=args.batch,
                        epochs=args.epoch, verbose=1, shuffle=True)
    model.evaluate(X_test, Y_test, verbose=0)
    model_json = model.to_json()
    if not os.path.isdir(args.output):
        os.makedirs(args.output)
    with open(os.path.join(args.output, 'ucf101_3dcnnmodel.json'), 'w') as json_file:
        json_file.write(model_json)
    model.save_weights(os.path.join(args.output, 'ucf101_3dcnnmodel.hd5'))

    loss, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test loss:', loss)
    print('Test accuracy:', acc)
    print(history.history.keys())
    plot_history(history, args.output)
    save_history(history, args.output)
Ejemplo n.º 6
0
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  n_labels=1,
                  initial_learning_rate=0.00001,
                  deconvolution=False,
                  depth=4,
                  n_base_filters=32,
                  include_label_wise_dice_coefficients=False,
                  metrics=dice_coefficient,
                  batch_normalization=False,
                  activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(
            input_layer=current_layer,
            n_filters=n_base_filters * (2**layer_depth),
            batch_normalization=batch_normalization)
        layer2 = create_convolution_block(
            input_layer=layer1,
            n_filters=n_base_filters * (2**layer_depth) * 2,
            batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = get_up_convolution(
            pool_size=pool_size,
            deconvolution=deconvolution,
            n_filters=current_layer._keras_shape[1])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=concat,
            batch_normalization=batch_normalization)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=current_layer,
            batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [
            get_label_dice_coefficient_function(index)
            for index in range(n_labels)
        ]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss=dice_coefficient_loss,
                  metrics=metrics)
    return model
Ejemplo n.º 7
0
def create_simple_fully_convolutional_network_model_3d(
        input_image_size,
        number_of_filters_per_layer=(32, 64, 128, 256, 256, 64),
        number_of_bins=40,
        dropout_rate=0.5):
    """
    Implementation of the "SCFN" architecture for Brain/Gender prediction

    Creates a keras model implementation of the Simple Fully Convolutional
    Network model from the FMRIB group:

       https://github.com/ha-ha-ha-han/UKBiobank_deep_pretrain


    Arguments
    ---------
    input_image_size : tuple of length 4
        Used for specifying the input tensor shape.  The shape (or dimension) of
        that tensor is the image dimensions followed by the number of channels
        (e.g., red, green, and blue).
    number_of_filters_per_layer : array 
        number of filters for the convolutional layers.
    number_of_bins : integer
        number of bins for final softmax output.
    dropout_rate : float between 0 and 1
        Optional dropout rate before final convolution layer. 

    Returns
    -------
    Keras model
        A 3-D keras model.

    Example
    -------
    >>> model = create_simple_fully_convolutional_network_model_3d((None, None, None, 1))
    >>> model.summary()
    """

    number_of_layers = len(number_of_filters_per_layer)

    inputs = Input(shape=input_image_size)

    outputs = inputs
    for i in range(number_of_layers):
        if i < number_of_layers - 1:
            outputs = Conv3D(filters=number_of_filters_per_layer[i],
                             kernel_size=(3, 3, 3),
                             padding='valid')(outputs)
            outputs = ZeroPadding3D(padding=(1, 1, 1))(outputs)
            outputs = BatchNormalization(momentum=0.1, epsilon=1e-5)(outputs)
            outputs = MaxPooling3D(pool_size=(2, 2, 2),
                                   strides=(2, 2, 2))(outputs)
        else:
            outputs = Conv3D(filters=number_of_filters_per_layer[i],
                             kernel_size=(1, 1, 1),
                             padding='valid')(outputs)
            outputs = BatchNormalization(momentum=0.1, epsilon=1e-5)(outputs)
        outputs = ReLU()(outputs)

    outputs = AveragePooling3D(pool_size=(5, 6, 5), strides=(5, 6, 5))(outputs)

    if dropout_rate > 0.0:
        outputs = Dropout(rate=dropout_rate)(outputs)

    outputs = Conv3D(filters=number_of_bins,
                     kernel_size=(1, 1, 1),
                     padding='valid')(outputs)
    outputs = LogSoftmax()(outputs)

    model = Model(inputs=inputs, outputs=outputs)

    return model
def do_run(i,
           x_train=None,
           y_train=None,
           res_dict=None,
           datagen_settings=None):
    import keras
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Conv3D, MaxPooling3D
    from keras.callbacks import EarlyStopping
    from keras.callbacks import ReduceLROnPlateau

    UTC_local = getUTC()  # Randomly generate hyperparameters

    x_train, y_train, x_test, y_test = split_train_test(x_train, y_train)

    modelIndex_dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}

    filters_dict = {0: 8, 1: 16, 2: 24}

    filterSize_dict = {0: 4, 1: 8}

    poolSize_dict = {0: 4, 1: 2, 2: 3}

    denseSize_dict = {0: 256, 1: 512, 2: 1024}

    dropout_dict = {0: 0, 1: 0.1, 2: 0.2, 3: 0.3, 4: 0.4}

    lr_dict = {0: 0.001, 1: 0.0001, 2: 0.00001}

    decay_dict = {0: 1e-04, 1: 1e-05, 2: 1e-06, 3: 1e-07}

    modelIndex = modelIndex_dict.get(rnd.randint(0, len(modelIndex_dict) - 1))
    filters = filters_dict.get(rnd.randint(0, len(filters_dict) - 1))
    filter_size = filterSize_dict.get(rnd.randint(0, len(filterSize_dict) - 1))
    pool_size = poolSize_dict.get(rnd.randint(0, len(poolSize_dict) - 1))
    dense_size = denseSize_dict.get(rnd.randint(0, len(denseSize_dict) - 1))
    dropout = dropout_dict.get(rnd.randint(0, len(dropout_dict) - 1))
    lr = lr_dict.get(rnd.randint(0, len(lr_dict) - 1))
    decay = decay_dict.get(rnd.randint(0, len(decay_dict) - 1))

    print('######### DEBUG - MAKE_MODEL - params')
    frame = inspect.currentframe()
    args, _, _, values = inspect.getargvalues(frame)
    print('function name "%s"' % inspect.getframeinfo(frame)[2])
    for g in args:
        if 'train' in g:
            continue
        print("    %s = %s" % (g, values[g]))

    print('modelIndex -' + str(modelIndex))
    print('filters - ' + str(filters))
    print('filter_size - ' + str(filter_size))
    print('pool_size - ' + str(pool_size))
    print('dense_size - ' + str(dense_size))
    print('dropout - ' + str(dropout))
    print('lr - ' + str(lr))
    print('decay - ' + str(decay))

    print('#########')
    model = Sequential()
    input_shape = x_train.shape[1:]

    if modelIndex == 1:

        # 1x Conv+relu+MaxPool+Dropout -> 1x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 2:
        # 2x Conv+relu+MaxPool+Dropout -> 1x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 3:
        # 2x Conv+relu+MaxPool+Dropout -> 2x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 4:
        # 2x Conv+relu+Conv+relu+MaxPool+Droput -> 1x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(Conv3D(filters, (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 5:
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(Conv3D(filters, (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    # initiate RMSprop optimizer
    opt = keras.optimizers.Adam(lr=lr, decay=decay)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    if datagen_settings:
        datagen_train = ImageGenerator(**datagen_settings)
        datagen_test = ImageGenerator(**datagen_settings)
    else:
        datagen_train = None
        datagen_test = None

    early_stopping = EarlyStopping(monitor='val_loss', patience=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3)
    dg_batch_size = 16
    print('In loop - {0}'.format(i))
    if datagen_train:
        print('Using data augmentation.')
        history = model.fit_generator(
            datagen_train.flow(x_train, y_train, batch_size=dg_batch_size),
            steps_per_epoch=len(x_train) / dg_batch_size,
            validation_data=datagen_test.flow(x_test,
                                              y_test,
                                              batch_size=dg_batch_size),
            validation_steps=len(x_test) / dg_batch_size,
            epochs=epochs,
            callbacks=[early_stopping, reduce_lr])
    else:
        print('Not using data augmentation.')
        history = model.fit(x_train,
                            y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_split=0.25,
                            shuffle=True,
                            callbacks=[early_stopping, reduce_lr])

    val_acc = max(np.array(history.history['val_acc']))
    print('Max Validation Accuracy - ' + str(val_acc))

    data = (UTC_local, val_acc, modelIndex, filters, filter_size, pool_size,
            dense_size, dropout, lr, decay)
    res_dict[i] = data
Ejemplo n.º 9
0
    def create_small_model(self, img_input):
        """create and return the i3d model
        :param: img_input: input shape of the network.
        :return: A Keras model instance.
        """

        # Determine proper input shape

        channel_axis = 4

        # Downsampling via convolution (spatial and temporal)
        x = self.conv3d_bath_norm(img_input,
                                  64,
                                  7,
                                  7,
                                  7,
                                  strides=(2, 2, 2),
                                  padding='same',
                                  name='Conv3d_1a_7x7')

        # Downsampling (spatial only)
        x = MaxPooling3D((1, 3, 3),
                         strides=(1, 2, 2),
                         padding='same',
                         name='MaxPool2d_2a_3x3')(x)
        x = self.conv3d_bath_norm(x,
                                  64,
                                  1,
                                  1,
                                  1,
                                  strides=(1, 1, 1),
                                  padding='same',
                                  name='Conv3d_2b_1x1')
        x = self.conv3d_bath_norm(x,
                                  192,
                                  3,
                                  3,
                                  3,
                                  strides=(1, 1, 1),
                                  padding='same',
                                  name='Conv3d_2c_3x3')

        # Downsampling (spatial only)
        x = MaxPooling3D((1, 3, 3),
                         strides=(1, 2, 2),
                         padding='same',
                         name='MaxPool2d_3a_3x3')(x)

        # Mixed 3b
        branch_0 = self.conv3d_bath_norm(x,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         96,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         128,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3b_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         16,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         32,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3b_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_3b_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_3b')

        # Mixed 3c
        branch_0 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         192,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3c_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         96,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3c_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_3c_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_3c')

        # Downsampling (spatial and temporal)
        x = MaxPooling3D((3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same',
                         name='MaxPool2d_4a_3x3')(x)

        # Mixed 4b
        branch_0 = self.conv3d_bath_norm(x,
                                         192,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         96,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         208,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4b_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         16,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         48,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4b_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4b_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4b')

        # Mixed 4c
        branch_0 = self.conv3d_bath_norm(x,
                                         160,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         112,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         224,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4c_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         24,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         64,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4c_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4c_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4c')

        # Mixed 4d
        branch_0 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         256,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4d_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         24,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         64,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4d_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4d_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4d')

        x = AveragePooling3D((2, 7, 7),
                             strides=(1, 1, 1),
                             padding='valid',
                             name='global_avg_pool')(x)

        x = Flatten()(x)
        x = Dropout(0.5)(x)
        # x = Dense(128, activation='relu')(x)
        # x = Dropout(0.5)(x)
        x = Dense(64, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(32, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(self.classes)(x)

        inputs = img_input

        # create model
        model = Model(inputs, x, name='i3d_inception')
        sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss=self.root_mean_squared_error, optimizer=sgd)

        return model
Ejemplo n.º 10
0
conv_channel_2 = 15
# conv_channel_3 = 5
kern_size = 3

input_patches = Input(shape=input_dim)

################################### ENCODER/DECODER ###################################
x_noise = GaussianNoise(sigma=.4)(input_patches)
x = Convolution3D(conv_channel_1,
                  kern_size,
                  kern_size,
                  kern_size,
                  activation='relu',
                  dim_ordering='th',
                  border_mode='same')(x_noise)
x = MaxPooling3D((2, 2, 2), dim_ordering='th')(x)
x = Convolution3D(conv_channel_2,
                  kern_size,
                  kern_size,
                  kern_size,
                  activation='relu',
                  dim_ordering='th',
                  border_mode='same')(x)
encoded = MaxPooling3D((2, 2, 2), dim_ordering='th')(x)

x = Convolution3D(conv_channel_2,
                  kern_size,
                  kern_size,
                  kern_size,
                  activation='relu',
                  dim_ordering='th',
Ejemplo n.º 11
0
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  deconvolution=True,
                  kernel=(3, 3, 3),
                  depth=5,
                  n_base_filters=32,
                  batch_normalization=True,
                  activation_name="linear"):

    # field map, brain mask of synthetic training data
    fm_in1 = Input((1, input_shape[1], input_shape[2], input_shape[3]))
    mask1 = Input((1, input_shape[1], input_shape[2], input_shape[3]))

    # field Map, brain mask, and data weighting matrix of target testing data
    fm_in2 = Input((1, input_shape[1], input_shape[2], input_shape[3]))
    mask2 = Input((1, input_shape[1], input_shape[2], input_shape[3]))
    w = Input((1, input_shape[1], input_shape[2], input_shape[3]))

    qsm_kernel = Input((1, input_shape[1], input_shape[2], input_shape[3]))

    current_layer1 = concatenate([fm_in1, mask1], axis=1)
    current_layer2 = concatenate([fm_in2, mask2], axis=1)
    levels1 = list()
    levels2 = list()

    for layer_depth in range(depth):
        layer1, layer2 = create_convolution_block(
            current_layer1,
            current_layer2,
            kernel=kernel,
            n_filters=n_base_filters * (2**layer_depth),
            batch_normalization=batch_normalization)

        if layer_depth < depth - 1:
            current_layer1 = MaxPooling3D(pool_size=pool_size)(layer1)
            levels1.append([layer1, current_layer1])
            current_layer2 = MaxPooling3D(pool_size=pool_size)(layer2)
            levels2.append([layer2, current_layer2])
        else:
            current_layer1 = layer1
            levels1.append([layer1])
            current_layer2 = layer2
            levels2.append([layer2])

    for layer_depth in range(depth - 2, -1, -1):
        up_conv = get_up_convolution(pool_size=pool_size,
                                     kernel_size=pool_size,
                                     deconvolution=deconvolution,
                                     n_filters=current_layer1._keras_shape[1] /
                                     2)
        up_convolution1 = up_conv(current_layer1)
        up_convolution2 = up_conv(current_layer2)
        concat1 = concatenate([up_convolution1, levels1[layer_depth][0]],
                              axis=1)
        concat2 = concatenate([up_convolution2, levels2[layer_depth][0]],
                              axis=1)
        current_layer1, current_layer2 = create_convolution_block(
            concat1,
            concat2,
            n_filters=levels1[layer_depth][0]._keras_shape[1],
            kernel=kernel,
            batch_normalization=batch_normalization,
            dilation_rate=(1, 1, 1))

    conv = Conv3D(1, kernel, padding='same')
    out1 = conv(current_layer1)
    out2 = conv(current_layer2)
    out1 = Activation(activation_name)(out1)
    out1 = Multiply()([out1, mask1])
    out2 = Activation(activation_name)(out2)
    out2 = Multiply()([out2, mask2])

    fm2 = CalFMLayer()([out2, qsm_kernel])
    err_fm = NDIErr()([fm_in2, fm2, w])
    err_fm = Multiply()([err_fm, mask2])

    model = Model(inputs=[fm_in1, mask1, fm_in2, mask2, qsm_kernel, w],
                  outputs=[out1, out2, err_fm])

    # model_t is just used to save the model for target testing data
    model_t = Model(inputs=[fm_in2, mask2], outputs=[out2])

    return model, model_t
Ejemplo n.º 12
0
def deep_learning(protein_train,protein_test,protein_target):
    from keras.models import Sequential
    from keras.layers.convolutional import Conv3D
    from keras.layers import Conv3D, MaxPooling3D,Activation,Reshape,Dense
    from keras.layers.normalization import BatchNormalization
    from keras.optimizers import Adam
    
    import keras.backend as K #For compile
    
    print('Start training')
    seq = Sequential()

    #seq.add(Conv3D(11, 3, 3, 3, activation='relu', 
                            #border_mode='valid', name='conv1',
                            #subsample=(1, 1, 1),
                            #dim_ordering='th', 
                            #input_shape=(11,120, 120, 120)))


    seq.add(Conv3D(filters=16, kernel_size=(3,3,3), strides=(1,1,1), activation='relu',padding='valid', data_format='channels_first', input_shape=(11,120, 120, 120)))

    seq.add(MaxPooling3D(pool_size=(3,3,3),strides=(2,2,2),data_format='channels_first'))

    
    seq.add(Conv3D(filters=32, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())
    
    seq.add(Activation('relu'))

    seq.add(MaxPooling3D(pool_size=(3,3,3),strides=(2,2,2),data_format='channels_first'))

    
    seq.add(Conv3D(filters=32, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())

    seq.add(Activation('relu'))

    seq.add(Conv3D(filters=64, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())

    seq.add(Activation('relu'))
   
    seq.add(MaxPooling3D(pool_size=(3,3,3),strides=(2,2,2),data_format='channels_first'))


    seq.add(Conv3D(filters=128, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())

    seq.add(Activation('relu'))

    seq.add(Conv3D(filters=128, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())

    seq.add(Activation('relu'))

    seq.add(Conv3D(filters=256, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())

    seq.add(Activation('relu'))

    seq.add(Conv3D(filters=512, kernel_size=(3,3,3), strides=(1,1,1),padding='valid', data_format='channels_first'))

    seq.add(BatchNormalization())

    seq.add(Activation('relu'))

    seq.add(MaxPooling3D(pool_size=(3,3,3),strides=(2,2,2),data_format='channels_first'))

    
    seq.add(Reshape((-1,)))

    #seq.add(Activation('linear'))
    seq.add(Dense(256,activation='linear'))

    seq.add(Activation('relu'))

    #seq.add(Activation('linear'))
    seq.add(Dense(128,activation='linear'))

    seq.add(Activation('relu'))

    #seq.add(Activation('linear'))
    seq.add(Dense(1,activation='linear'))
    
    

    seq.summary()
    print('ready')
    def mean_pred(y_true, y_pred):
        return K.mean(y_pred)
    
    #protein_target=np.random.rand(16)
    #protein_target=np.random.rand(16,269748)
    #protein_train=np.random.rand(16,11,120,120,120)

    adam = Adam(lr=0.0003, decay=0.01)
    seq.compile(loss='mean_squared_error',
            optimizer=adam,
              metrics=['accuracy', mean_pred])

    
    seq.fit(protein_train,protein_target,
          epochs=20,
          batch_size=9)
    
    print('Training done')
Y_test = np_utils.to_categorical(Y_test, nb_classes)

model = Sequential()

model.add(
    Convolution3D(32,
                  3,
                  3,
                  3,
                  border_mode='same',
                  input_shape=X_train.shape[1:],
                  dim_ordering='th'))
model.add(Activation('relu'))
model.add(Convolution3D(32, 3, 3, 2, dim_ordering='th'))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(3, 3, 2), dim_ordering='th'))
model.add(Dropout(0.25))

model.add(Convolution3D(64, 3, 3, 3, border_mode='same', dim_ordering='th'))
model.add(Activation('relu'))
model.add(Convolution3D(64, 2, 2, 2, dim_ordering='th'))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(3, 3, 3), dim_ordering='th'))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
Ejemplo n.º 14
0
def model_gru(input_shape):
    """
    Function creating the model's graph in Keras.
    
    Argument:
    input_shape -- shape of the model's input data (using Keras conventions)

    Returns:
    model -- Keras model instance
    """
    strd = (2, 2, 1)  # strides for maxpooling
    sz = (3, 3, 3)  # size of filter in stackblock

    X_input = Input(shape=input_shape)

    H, W, T, _ = input_shape

    X = stackBlock(X_input, 4, sz, 1)
    X = stackBlock(X, 4, sz, 2)
    X = MaxPooling3D((2, 2, 1), strides=strd)(X)

    X = stackBlock(X, 4, sz, 3)
    X = stackBlock(X, 4, sz, 4)
    X = MaxPooling3D((2, 2, 1), strides=strd)(X)

    X = stackBlock(X, 4, sz, 5)
    X = stackBlock(X, 4, sz, 6)
    X = MaxPooling3D((2, 2, 1), strides=strd)(X)

    sp = int_shape(X)
    print(sp)

    # (m),H,W,T,C: must transform into (batch_size, timesteps, input_dim)

    X = Permute((3, 1, 2, 4))(X)
    X = Reshape((T, -1))(X)

    sp = int_shape(X)
    print(sp)

    # Step 3: First GRU Layer
    X = GRU(32, return_sequences=True)(
        X)  # GRU (use 32 units and return the sequences)
    X = Dropout(0.2)(X)
    X = BatchNormalization()(X)

    # Step 4: Second GRU Layer
    X = GRU(32, return_sequences=True)(
        X)  # GRU (use 32 units and return the sequences)
    X = Dropout(0.2)(X)
    X = BatchNormalization()(X)
    X = Dropout(0.2)(X)

    # Step 5: Time-distributed dense layer
    X = TimeDistributed(Dense(1, activation="sigmoid"))(
        X)  # time distributed  (sigmoid)

    sp = int_shape(X)
    print(sp)

    X = Reshape((T, 1, 1, 1))(X)
    X = Permute((2, 3, 1, 4))(X)

    model = Model(inputs=X_input, outputs=X)

    return model
Ejemplo n.º 15
0
def get_model(data_in_mbe, data_out, _cnn_nb_filt, _cnn_pool_size_mbe, _rnn_nb,
              _fc_nb, _nb_ch):

    #----------------------------------------------------------------------------------------------------------------------
    # MBE branch
    #----------------------------------------------------------------------------------------------------------------------
    spec_start = Input(shape=(data_in_mbe.shape[-4], data_in_mbe.shape[-3],
                              data_in_mbe.shape[-2], data_in_mbe.shape[-1]))
    spec_x = spec_start

    for _i, _cnt in enumerate(_cnn_pool_size_mbe):
        if _i == 0:
            spec_x = Conv3D(filters=_cnn_nb_filt,
                            kernel_size=(_nb_ch, 3, 3),
                            padding='same')(spec_x)
            spec_x = BatchNormalization(axis=1)(spec_x)
            spec_x = Activation('relu')(spec_x)
            spec_x = MaxPooling3D(pool_size=(1, 1,
                                             _cnn_pool_size_mbe[_i]))(spec_x)
            spec_x = Dropout(dropout_rate)(spec_x)
            spec_x = Reshape((-1, data_in_mbe.shape[-2], 8))(spec_x)
        else:
            spec_x = Conv2D(filters=_cnn_nb_filt,
                            kernel_size=(3, 3),
                            padding='same')(spec_x)
            spec_x = BatchNormalization(axis=1)(spec_x)
            spec_x = Activation('relu')(spec_x)
            spec_x = MaxPooling2D(pool_size=(1,
                                             _cnn_pool_size_mbe[_i]))(spec_x)
            spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = Permute((2, 1, 3))(spec_x)
    spec_x = Reshape((data_in_mbe.shape[-2], -1))(spec_x)
    print("spec_x: ", spec_x.shape)

    #TODO
    """
            concatenazione(spec_x - spec_x_gcc)
                          |
              LSTM fw           LSTM bw
                          |
             concatenazione(spec_x - spec_x_gcc)
                          |
               LSTM fw           LSTM bw
                          |
            concatenazione  (spec_x - spec_x_gcc)
                          |
               Fully connected sigmoid 
    """

    #----------------------------------------------------
    # RNN
    #----------------------------------------------------

    for _r in _rnn_nb:
        spec_x = Bidirectional(GRU(_r,
                                   activation='tanh',
                                   dropout=dropout_rate,
                                   recurrent_dropout=dropout_rate,
                                   return_sequences=True),
                               merge_mode='concat')(spec_x)
    """
    spec_x_conc = Bidirectional(GRU(_rnn_nb[0], activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate,return_sequences= True, go_backwards= True),
            merge_mode ='concat')(spec_x_conc)
    print("spec_conc: ", spec_x_conc.shape)

    spec_x_conc = Bidirectional(GRU(_rnn_nb[1], activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate,return_sequences= True,go_backwards= True),
            merge_mode ='concat')(spec_x_conc)
    """

    for _f in _fc_nb:
        spec_x = TimeDistributed(Dense(_f))(spec_x)
        spec_x = Dropout(dropout_rate)(spec_x)

    # Dense - out T x 6 CLASSES
    spec_x = TimeDistributed(Dense(data_out.shape[-1]))(spec_x)
    out = Activation('sigmoid', name='strong_out')(spec_x)

    _model = Model(inputs=spec_start, outputs=out)
    adam = keras.optimizers.Adam(lr=0.0001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0)
    _model.compile(optimizer=adam,
                   loss='binary_crossentropy',
                   metrics=['accuracy'])  #lr = 1x10-4
    #_model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy']) #lr = 1x10-4
    _model.summary()
    return _model
Ejemplo n.º 16
0
    def create_model(self, img_input, optimizer=Adam(lr=1e-4)):
        """create and return the i3d model
        :param: img_input: input shape of the network.
        :param: optimizer: the optimizer for the CNN. SGD or Adam with low learning rate.
        :return: A Keras model instance.
        """

        # Determine proper input shape

        channel_axis = 4

        # Downsampling via convolution (spatial and temporal)
        x = self.conv3d_bath_norm(img_input,
                                  64,
                                  7,
                                  7,
                                  7,
                                  strides=(2, 2, 2),
                                  padding='same',
                                  name='Conv3d_1a_7x7')

        # Downsampling (spatial only)
        x = MaxPooling3D((1, 3, 3),
                         strides=(1, 2, 2),
                         padding='same',
                         name='MaxPool2d_2a_3x3')(x)
        x = self.conv3d_bath_norm(x,
                                  64,
                                  1,
                                  1,
                                  1,
                                  strides=(1, 1, 1),
                                  padding='same',
                                  name='Conv3d_2b_1x1')
        x = self.conv3d_bath_norm(x,
                                  192,
                                  3,
                                  3,
                                  3,
                                  strides=(1, 1, 1),
                                  padding='same',
                                  name='Conv3d_2c_3x3')

        # Downsampling (spatial only)
        x = MaxPooling3D((1, 3, 3),
                         strides=(1, 2, 2),
                         padding='same',
                         name='MaxPool2d_3a_3x3')(x)

        # Mixed 3b
        branch_0 = self.conv3d_bath_norm(x,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         96,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         128,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3b_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         16,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         32,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3b_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_3b_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3b_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_3b')

        # Mixed 3c
        branch_0 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         192,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3c_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         96,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_3c_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_3c_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_3c_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_3c')

        # Downsampling (spatial and temporal)
        x = MaxPooling3D((3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same',
                         name='MaxPool2d_4a_3x3')(x)

        # Mixed 4b
        branch_0 = self.conv3d_bath_norm(x,
                                         192,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         96,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         208,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4b_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         16,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         48,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4b_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4b_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4b_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4b')

        # Mixed 4c
        branch_0 = self.conv3d_bath_norm(x,
                                         160,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         112,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         224,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4c_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         24,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         64,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4c_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4c_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4c_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4c')

        # Mixed 4d
        branch_0 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         256,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4d_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         24,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         64,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4d_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4d_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4d_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4d')

        # Mixed 4e
        branch_0 = self.conv3d_bath_norm(x,
                                         112,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4e_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         144,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4e_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         288,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4e_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4e_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         64,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4e_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4e_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         64,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4e_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4e')

        # Mixed 4f
        branch_0 = self.conv3d_bath_norm(x,
                                         256,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4f_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         160,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4f_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         320,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4f_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4f_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         128,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_4f_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_4f_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_4f_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_4f')

        # Downsampling (spatial and temporal)
        x = MaxPooling3D((2, 2, 2),
                         strides=(2, 2, 2),
                         padding='same',
                         name='MaxPool2d_5a_2x2')(x)

        # Mixed 5b
        branch_0 = self.conv3d_bath_norm(x,
                                         256,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5b_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         160,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5b_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         320,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_5b_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         32,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5b_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         128,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_5b_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_5b_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5b_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_5b')

        # Mixed 5c
        branch_0 = self.conv3d_bath_norm(x,
                                         384,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5c_0a_1x1')

        branch_1 = self.conv3d_bath_norm(x,
                                         192,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5c_1a_1x1')
        branch_1 = self.conv3d_bath_norm(branch_1,
                                         384,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_5c_1b_3x3')

        branch_2 = self.conv3d_bath_norm(x,
                                         48,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5c_2a_1x1')
        branch_2 = self.conv3d_bath_norm(branch_2,
                                         128,
                                         3,
                                         3,
                                         3,
                                         padding='same',
                                         name='Conv3d_5c_2b_3x3')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_5c_3a_3x3')(x)
        branch_3 = self.conv3d_bath_norm(branch_3,
                                         128,
                                         1,
                                         1,
                                         1,
                                         padding='same',
                                         name='Conv3d_5c_3b_1x1')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=channel_axis,
                               name='Mixed_5c')

        # Classification block
        x = AveragePooling3D((2, 7, 7),
                             strides=(1, 1, 1),
                             padding='valid',
                             name='global_avg_pool')(x)
        x = Dropout(self.dropout_prob)(x)

        x = self.conv3d_bath_norm(x,
                                  self.classes,
                                  1,
                                  1,
                                  1,
                                  padding='same',
                                  use_bias=True,
                                  use_activation_fn=False,
                                  use_bn=False,
                                  name='Conv3d_6a_1x1')

        num_frames_remaining = int(x.shape[1])
        x = Reshape((num_frames_remaining, self.classes))(x)
        x = Flatten()(x)
        x = Dense(self.classes)(x)

        inputs = img_input

        # create model
        model = Model(inputs, x, name='i3d_inception')
        model.compile(loss=self.root_mean_squared_error, optimizer=optimizer)

        return model
Ejemplo n.º 17
0
 def unet3d(self):
     inputs = Input(shape=self.input_shape)
     enc = Conv3D(filters=self.init_filter, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(inputs)
     temp = inputs
     enc = BatchNormalization()(enc)
     enc = Activation("selu")(enc)
     enc = Conv3D(filters=self.init_filter, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc)
     enc = BatchNormalization()(enc)
     enc = keras.layers.Add()([enc, temp])
     enc = Activation("selu")(enc)
     del temp
     
     enc2 = MaxPooling3D(pool_size=(2,2,2))(enc)
     enc2 = Conv3D(filters=self.init_filter * 2, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc2)
     temp = enc2
     enc2 = BatchNormalization()(enc2)
     enc2 = Activation("selu")(enc2)
     enc2 = Conv3D(filters=self.init_filter * 2, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc2)
     enc2 = BatchNormalization()(enc2)
     enc2 = keras.layers.Add()([enc2, temp])
     enc2 = Activation("selu")(enc2)
     del temp
     
     enc3 = MaxPooling3D(pool_size=(2,2,2))(enc2)
     enc3 = Conv3D(filters=self.init_filter * 4, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc3)
     temp = enc3
     enc3 = BatchNormalization()(enc3)
     enc3 = Activation("selu")(enc3)
     enc3 = Conv3D(filters=self.init_filter * 4, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc3)
     enc3 = BatchNormalization()(enc3)
     enc3 = keras.layers.Add()([enc3, temp])
     enc3 = Activation("selu")(enc3)
     del temp
     
     enc4 = MaxPooling3D(pool_size=(2,2,2))(enc3)
     enc4 = Conv3D(filters=self.init_filter * 8, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc4)
     temp = enc4
     enc4 = BatchNormalization()(enc4)
     enc4 = Activation("selu")(enc4)
     enc4 = Conv3D(filters=self.init_filter * 8, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(enc4)
     enc4 = BatchNormalization()(enc4)
     enc4 = keras.layers.Add()([enc4, temp])
     enc4 = Activation("selu")(enc4)
     del temp
     
     dec = UpSampling3D(size=(2,2,2))(enc4)
     dec = Concatenate(axis=-1)([dec, enc3])
     dec = Conv3D(filters=self.init_filter * 8, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(dec)
     temp = dec
     dec = BatchNormalization()(dec)
     dec = Activation("selu")(dec)
     dec = Conv3D(filters=self.init_filter * 8, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(dec)
     dec = BatchNormalization()(dec)
     dec = keras.layers.Add()([dec, temp])
     dec = Activation("selu")(dec)
     del temp
     
     dec2 = UpSampling3D(size=(2,2,2))(dec)
     dec2 = Concatenate(axis=-1)([dec2, enc2])
     dec2 = Conv3D(filters=self.init_filter * 4, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(dec2)
     temp = dec2
     dec2 = BatchNormalization()(dec2)
     dec2 = Activation("selu")(dec2)
     dec2 = Conv3D(filters=self.init_filter * 4, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(dec2)
     dec2 = BatchNormalization()(dec2)
     dec2 = keras.layers.Add()([dec2, temp])
     dec2 = Activation("selu")(dec2)
     del temp
     
     dec3 = UpSampling3D(size=(2,2,2))(dec2)
     dec3 = Concatenate(axis=-1)([dec3, enc])
     dec3 = Conv3D(filters=self.init_filter * 2, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(dec3)
     temp = dec3
     dec3 = BatchNormalization()(dec3)
     dec3 = Activation("selu")(dec3)
     dec3 = Conv3D(filters=self.init_filter * 2, kernel_size=(3,3,3), padding='same', kernel_initializer='glorot_normal')(dec3)
     dec3 = BatchNormalization()(dec3)
     dec3 = keras.layers.Add()([dec3, temp])
     dec3 = Activation("selu")(dec3)
     del temp
     
     out = Conv3D(filters=self.channels, kernel_size=(3,3,3), activation='softmax', padding='same', kernel_initializer='glorot_normal', name="seg_output")(dec3)
     
     model = Model(inputs,out)
     model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)
     # model.summary()
     if self.pretrained_weights:
         model.load_weight(self.pretrained_weights)
     return model
Ejemplo n.º 18
0
def finalNetwork(batch_size, initial_filter_value, time):

	# INITIAL FACE CHANNEL

	input_x = Input(shape=(time,256,256,3))
	x = Conv3D(filters=initial_filter_value, kernel_size=3, padding='same', activation='relu')(input_x)
	x = BatchNormalization()(x)

	x = MaxPooling3D(pool_size=(2,2,2))(x)
	x = Dropout(0.2)(x)

	x = Conv3D(filters=initial_filter_value, kernel_size=3, padding='same', activation='relu')(x)
	x = BatchNormalization()(x)

	x = MaxPooling3D(pool_size=(4,4,4))(x)
	x = Dropout(0.2)(x)

	x = Conv3D(filters=initial_filter_value*2, kernel_size=3, padding='same', activation='relu')(x)
	x = BatchNormalization()(x)

	# END INITIAL FACE CHANNEL

	# INITIAL AUDIO CHANNEL

	input_y = Input(shape=(256,256,3))
	y = Conv2D(filters=initial_filter_value, kernel_size=3, padding='same', activation='relu')(input_y)
	
	# END INITIAL AUDIO CHANNEL

	# CREATE CHANNEL FOR IMAGE

#	cross_channel_x = Flatten()(x)
#	cross_channel_x = Dense((25*256*256*8)//128)(cross_channel_x)
	
	# END CHANNEL

	# CREATE CHANNEL FOR AUDIO

#	cross_channel_y = Flatten()(y)
#	cross_channel_y = Dense((25*256*256*8)//128)(cross_channel_y)

	# END CHANNEL FOR AUDIO

	#CHANNEL MERGE

#	merged_channel = concatenate([cross_channel_x, cross_channel_y])
#	merged_channel = Reshape((2,25,2048,2), input_shape=(1,204800))(merged_channel)
#	merged_channel = Conv3D(filters=8, kernel_size=5, padding='same')(merged_channel)

#	merge_input_x = Flatten()(merged_channel)
#	merge_input_x = Dense(2048)(merge_input_x)
#	merged_channel_output_x = Reshape((1,16,16,8), input_shape=(1,2048))(merge_input_x)
#	merge_input_x = None
	
#	merge_input_y = Flatten()(merged_channel)
#	merge_input_y = Dense(256*256*3)(merge_input_y)
#	merged_channel_output_y = Reshape((256,256,3), input_shape=(1,256*256*3))(merge_input_y)
#	merge_input_y = None
#	merged_channel = None

	# END CHANNEL MERGE

	# START END OF VISUAL

#	x = concatenate([x, merged_channel_output_x])
#	merged_channel_output_x = None

	x = Flatten()(x)
	x = Dense(100, activation='relu')(x)
#	x = Dropout(0.5)(x)
	#x = Dense(150, activation='relu')(x)
	#x = Dropout(0.5)(x)
	
	visual_channel = Dense(time, activation='linear')(x)
	# FINISH VISUAL

	# START END OF AUDIO

#	y = concatenate([y, merged_channel_output_y])
#	merged_channel_output_y = None

	y = Conv2D(filters=initial_filter_value*2, kernel_size=3, padding='same', activation='relu')(y)
	
	y = Flatten()(y)
	y = Dense(100, activation='relu')(y)
#	y = Dropout(0.5)(y)
	
	audio_channel = Dense(time, activation='linear')(y)

	# FINISH AUDIO
	model = Model(inputs=[input_x,input_y], outputs=[visual_channel,audio_channel])
	model.summary()
	return multi_gpu_model(model)
Ejemplo n.º 19
0
    # alexTrain = alexTrain.reshape(alexTrain.shape[0], img_sli, numRow,numCol, 1)
    # alexTest = alexTest.reshape(alexTest.shape[0], img_sli, numRow,numCol, 1)
    # alexNetValid = alexNetValid.reshape(alexNetValid.shape[0], img_sli, numRow,numCol, 1)
    input_shape = (img_sli, numRow, numCol, 1)

print("Now constructing the new CNN")
postAlexModel = Sequential()

postAlexModel.add(
    Convolution3D(nb_filters,
                  kernel_size[0],
                  kernel_size[1],
                  kernel_size[2],
                  border_mode='valid',
                  input_shape=input_shape))
postAlexModel.add(MaxPooling3D(pool_size=pool_size))
postAlexModel.add(Dropout(0.2))
postAlexModel.add(Flatten())
#model.add(Dense(128, init='normal',activation='relu'))
postAlexModel.add(Dense(16, init='normal', activation='sigmoid'))
postAlexModel.add(Dense(nb_classes, init='normal', activation='softmax'))
postAlexModel.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])

postAlexModel.fit_generator(alexNetDataGenerator(trainTestIDs, trainTestLabels,
                                                 indsTrain),
                            samples_per_epoch=100,
                            nb_val_samples=50,
                            nb_epoch=nb_epoch,
                            verbose=1,
Ejemplo n.º 20
0
def pi3d_model(fc_main,
               model_inputs,
               dataset,
               protocol,
               all_models_name=[],
               mode='sum',
               dropout_prob=0.0,
               num_classes=60,
               sum_idx=0,
               train_end_to_end=False):
    mode = mode
    all_models_name = all_models_name
    #all_models = {}
    if sum_idx == 0:
        global f_dept
        f_dept = 1024

    pi3d_interm_outputs = []
    for model_name in all_models_name:
        model = load_model('./weights_optim/{}/weights_{}_{}.hdf5'.format(
            dataset, model_name, protocol))
        for idx in range(len(model.layers)):
            model.get_layer(
                index=idx).name = model.layers[idx].name + '_' + model_name

        for l in model.layers:
            l.trainable = train_end_to_end

        model_inputs.append(model.input)
        if sum_idx <= 3 and sum_idx >= 0:
            pi3d_interm_outputs.append(
                Reshape((1, 8, 7, 7, f_dept))(
                    model.get_layer(index=-46 + (2 - sum_idx) * 20).output))

    x = concatenate(pi3d_interm_outputs, axis=1)
    inflated_fc_main = keras.layers.core.Lambda(inflate_dense,
                                                output_shape=(no_of_p, 8, 7, 7,
                                                              f_dept))(fc_main)
    multiplied_features = keras.layers.Multiply()([inflated_fc_main, x])

    if mode == 'sum':
        x = keras.layers.core.Lambda(
            sum_feature, output_shape=(8, 7, 7, f_dept))(multiplied_features)
    elif mode == 'cat':
        x = keras.layers.core.Lambda(
            concat_feature,
            output_shape=(8, 7, 7, f_dept * no_of_p))(multiplied_features)

    ##second part of I3D

    if sum_idx == 2:
        # Mixed 5b
        branch_0 = conv3d_bn(x,
                             256,
                             1,
                             1,
                             1,
                             padding='same',
                             name='' + 'second')

        branch_1 = conv3d_bn(x,
                             160,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5b_1a_1x1' + 'second')
        branch_1 = conv3d_bn(branch_1,
                             320,
                             3,
                             3,
                             3,
                             padding='same',
                             name='Conv3d_5b_1b_3x3' + 'second')

        branch_2 = conv3d_bn(x,
                             32,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5b_2a_1x1' + 'second')
        branch_2 = conv3d_bn(branch_2,
                             128,
                             3,
                             3,
                             3,
                             padding='same',
                             name='Conv3d_5b_2b_3x3' + 'second')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_5b_3a_3x3' + 'second')(x)
        branch_3 = conv3d_bn(branch_3,
                             128,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5b_3b_1x1' + 'second')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=4,
                               name='Mixed_5b' + 'second')

    if sum_idx == 1 or sum_idx == 2:
        # Mixed 5c
        branch_0 = conv3d_bn(x,
                             384,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5c_0a_1x1' + 'second')

        branch_1 = conv3d_bn(x,
                             192,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5c_1a_1x1' + 'second')
        branch_1 = conv3d_bn(branch_1,
                             384,
                             3,
                             3,
                             3,
                             padding='same',
                             name='Conv3d_5c_1b_3x3' + 'second')

        branch_2 = conv3d_bn(x,
                             48,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5c_2a_1x1' + 'second')
        branch_2 = conv3d_bn(branch_2,
                             128,
                             3,
                             3,
                             3,
                             padding='same',
                             name='Conv3d_5c_2b_3x3' + 'second')

        branch_3 = MaxPooling3D((3, 3, 3),
                                strides=(1, 1, 1),
                                padding='same',
                                name='MaxPool2d_5c_3a_3x3' + 'second')(x)
        branch_3 = conv3d_bn(branch_3,
                             128,
                             1,
                             1,
                             1,
                             padding='same',
                             name='Conv3d_5c_3b_1x1' + 'second')

        x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                               axis=4,
                               name='Mixed_5c' + 'second')

    #Classification block
    x = AveragePooling3D((2, 7, 7),
                         strides=(1, 1, 1),
                         padding='valid',
                         name='global_avg_pool' + 'second')(x)
    x = Dropout(dropout_prob)(x)

    x = conv3d_bn(x,
                  num_classes,
                  1,
                  1,
                  1,
                  padding='same',
                  use_bias=True,
                  use_activation_fn=False,
                  use_bn=False,
                  name='Conv3d_6a_1x1' + 'second')

    x = Flatten(name='flatten' + 'second')(x)
    predictions = Dense(num_classes,
                        activation='softmax',
                        name='softmax' + 'second')(x)
    model = Model(inputs=model_inputs, outputs=predictions, name='PI3D')

    model_second = Inception_Inflated3d(include_top=True,
                                        weights='rgb_imagenet_and_kinetics')

    weight_idx_s = -45 + (2 - sum_idx) * 20
    weight_idx_e = -4

    for l_m, l_lh in zip(model.layers[weight_idx_s:weight_idx_e],
                         model_second.layers[weight_idx_s:weight_idx_e]):
        l_m.set_weights(l_lh.get_weights())
        l_m.trainable = True

    lstm_weights = "./weights_optim/{}/lstm_model_{}.hdf5".format(
        dataset, protocol)
    l_model = load_model(lstm_weights, compile=False)

    for idx1 in range(len(model.layers)):
        n1 = model.layers[idx1].name
        if 'lstm' in n1:
            for idx2 in range(len(l_model.layers)):
                n2 = l_model.layers[idx2].name
                if n1 == n2:
                    model.layers[idx1].set_weights(
                        l_model.layers[idx2].get_weights())
                    break

    return model
Ejemplo n.º 21
0
def Inception_Inflated3d(include_top=True,
                         weights=None,
                         input_tensor=None,
                         input_shape=None,
                         dropout_prob=0.0,
                         endpoint_logit=True,
                         classes=400):
    """Instantiates the Inflated 3D Inception v1 architecture.

    Optionally loads weights pre-trained
    on Kinetics. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format='channels_last'` in your Keras config
    at ~/.keras/keras.json.
    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.
    Note that the default input frame(image) size for this model is 224x224.

    # Arguments
        include_top: whether to include the the classification 
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or 'kinetics_only' (pre-training on Kinetics dataset only).
            or 'imagenet_and_kinetics' (pre-training on ImageNet and Kinetics datasets).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(NUM_FRAMES, 224, 224, 3)` (with `channels_last` data format)
            or `(NUM_FRAMES, 3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels.
            NUM_FRAMES should be no smaller than 8. The authors used 64
            frames per example for training and testing on kinetics dataset
            Also, Width and height should be no smaller than 32.
            E.g. `(64, 150, 150, 3)` would be one valid value.
        dropout_prob: optional, dropout probability applied in dropout layer
            after global average pooling layer. 
            0.0 means no dropout is applied, 1.0 means dropout is applied to all features.
            Note: Since Dropout is applied just before the classification
            layer, it is only useful when `include_top` is set to True.
        endpoint_logit: (boolean) optional. If True, the model's forward pass
            will end at producing logits. Otherwise, softmax is applied after producing
            the logits to produce the class probabilities prediction. Setting this parameter 
            to True is particularly useful when you want to combine results of rgb model
            and optical flow model.
            - `True` end model forward pass at logit output
            - `False` go further after logit to produce softmax predictions
            Note: This parameter is only useful when `include_top` is set to True.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if not (weights in WEIGHTS_NAME or weights is None
            or os.path.exists(weights)):
        raise ValueError(
            'The `weights` argument should be either '
            '`None` (random initialization) or %s' % str(WEIGHTS_NAME) + ' '
            'or a valid path to a file containing `weights` values')

    if weights in WEIGHTS_NAME and include_top and classes != 400:
        raise ValueError(
            'If using `weights` as one of these %s, with `include_top`'
            ' as true, `classes` should be 400' % str(WEIGHTS_NAME))

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_frame_size=224,
                                      min_frame_size=32,
                                      default_num_frames=64,
                                      min_num_frames=8,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 4

    # Downsampling via convolution (spatial and temporal)
    x = conv3d_bn(img_input,
                  64,
                  7,
                  7,
                  7,
                  strides=(2, 2, 2),
                  padding='same',
                  name='Conv3d_1a_7x7')

    # Downsampling (spatial only)
    x = MaxPooling3D((1, 3, 3),
                     strides=(1, 2, 2),
                     padding='same',
                     name='MaxPool2d_2a_3x3')(x)
    x = conv3d_bn(x,
                  64,
                  1,
                  1,
                  1,
                  strides=(1, 1, 1),
                  padding='same',
                  name='Conv3d_2b_1x1')
    x = conv3d_bn(x,
                  192,
                  3,
                  3,
                  3,
                  strides=(1, 1, 1),
                  padding='same',
                  name='Conv3d_2c_3x3')

    # Downsampling (spatial only)
    x = MaxPooling3D((1, 3, 3),
                     strides=(1, 2, 2),
                     padding='same',
                     name='MaxPool2d_3a_3x3')(x)

    # Mixed 3b
    branch_0 = conv3d_bn(x,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_0a_1x1')

    branch_1 = conv3d_bn(x,
                         96,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3b_1b_3x3')

    branch_2 = conv3d_bn(x,
                         16,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         32,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3b_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_3b_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_3b')

    # Mixed 3c
    branch_0 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_0a_1x1')

    branch_1 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         192,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3c_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         96,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3c_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_3c_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_3c')

    # Downsampling (spatial and temporal)
    x = MaxPooling3D((3, 3, 3),
                     strides=(2, 2, 2),
                     padding='same',
                     name='MaxPool2d_4a_3x3')(x)

    # Mixed 4b
    branch_0 = conv3d_bn(x,
                         192,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_0a_1x1')

    branch_1 = conv3d_bn(x,
                         96,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         208,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4b_1b_3x3')

    branch_2 = conv3d_bn(x,
                         16,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         48,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4b_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4b_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4b')

    # Mixed 4c
    branch_0 = conv3d_bn(x,
                         160,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_0a_1x1')

    branch_1 = conv3d_bn(x,
                         112,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         224,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4c_1b_3x3')

    branch_2 = conv3d_bn(x,
                         24,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         64,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4c_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4c_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4c')

    # Mixed 4d
    branch_0 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_0a_1x1')

    branch_1 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         256,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4d_1b_3x3')

    branch_2 = conv3d_bn(x,
                         24,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         64,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4d_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4d_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4d')

    # Mixed 4e
    branch_0 = conv3d_bn(x,
                         112,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_0a_1x1')

    branch_1 = conv3d_bn(x,
                         144,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         288,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4e_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         64,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4e_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4e_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4e')

    # Mixed 4f
    branch_0 = conv3d_bn(x,
                         256,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_0a_1x1')

    branch_1 = conv3d_bn(x,
                         160,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         320,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4f_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4f_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4f_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4f')

    # Downsampling (spatial and temporal)
    x = MaxPooling3D((2, 2, 2),
                     strides=(2, 2, 2),
                     padding='same',
                     name='MaxPool2d_5a_2x2')(x)

    # Mixed 5b
    branch_0 = conv3d_bn(x,
                         256,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_0a_1x1')

    branch_1 = conv3d_bn(x,
                         160,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         320,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5b_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5b_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_5b_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_5b')

    # Mixed 5c
    branch_0 = conv3d_bn(x,
                         384,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_0a_1x1')

    branch_1 = conv3d_bn(x,
                         192,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         384,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5c_1b_3x3')

    branch_2 = conv3d_bn(x,
                         48,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5c_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_5c_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_5c')

    if include_top:
        # Classification block
        x = AveragePooling3D((2, 7, 7),
                             strides=(1, 1, 1),
                             padding='valid',
                             name='global_avg_pool')(x)
        x = Dropout(dropout_prob)(x)

        x = conv3d_bn(x,
                      classes,
                      1,
                      1,
                      1,
                      padding='same',
                      use_bias=True,
                      use_activation_fn=False,
                      use_bn=False,
                      name='Conv3d_6a_1x1')

        num_frames_remaining = int(x.shape[1])
        x = Reshape((num_frames_remaining, classes))(x)

        # logits (raw scores for each class)
        x = Lambda(lambda x: K.mean(x, axis=1, keepdims=False),
                   output_shape=lambda s: (s[0], s[2]))(x)

        if not endpoint_logit:
            x = Activation('softmax', name='prediction')(x)
    else:
        h = int(x.shape[2])
        w = int(x.shape[3])
        x = AveragePooling3D((2, h, w),
                             strides=(1, 1, 1),
                             padding='valid',
                             name='global_avg_pool')(x)

    inputs = img_input
    # create model
    model = Model(inputs, x, name='i3d_inception')

    # load weights
    if weights in WEIGHTS_NAME:
        if weights == WEIGHTS_NAME[0]:  # rgb_kinetics_only
            if include_top:
                weights_url = WEIGHTS_PATH['rgb_kinetics_only']
                model_name = 'i3d_inception_rgb_kinetics_only.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['rgb_kinetics_only']
                model_name = 'i3d_inception_rgb_kinetics_only_no_top.h5'

        elif weights == WEIGHTS_NAME[1]:  # flow_kinetics_only
            if include_top:
                weights_url = WEIGHTS_PATH['flow_kinetics_only']
                model_name = 'i3d_inception_flow_kinetics_only.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['flow_kinetics_only']
                model_name = 'i3d_inception_flow_kinetics_only_no_top.h5'

        elif weights == WEIGHTS_NAME[2]:  # rgb_imagenet_and_kinetics
            if include_top:
                weights_url = WEIGHTS_PATH['rgb_imagenet_and_kinetics']
                model_name = 'i3d_inception_rgb_imagenet_and_kinetics.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['rgb_imagenet_and_kinetics']
                model_name = 'i3d_inception_rgb_imagenet_and_kinetics_no_top.h5'

        elif weights == WEIGHTS_NAME[3]:  # flow_imagenet_and_kinetics
            if include_top:
                weights_url = WEIGHTS_PATH['flow_imagenet_and_kinetics']
                model_name = 'i3d_inception_flow_imagenet_and_kinetics.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['flow_imagenet_and_kinetics']
                model_name = 'i3d_inception_flow_imagenet_and_kinetics_no_top.h5'

        downloaded_weights_path = get_file(model_name,
                                           weights_url,
                                           cache_subdir='models')
        model.load_weights(downloaded_weights_path)

        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your keras config '
                          'at ~/.keras/keras.json.')

    elif weights is not None:
        model.load_weights(weights)

    return model
Ejemplo n.º 22
0
        def unet_model_3d(input_shape,
                          downsize_filters_factor=1,
                          pool_size=(2, 2, 2),
                          n_labels=1,
                          initial_learning_rate=0.01,
                          deconvolution=False):
            """
            Builds the 3D U-Net Keras model.
            The [U-Net](https://arxiv.org/abs/1505.04597) uses a fully-convolutional architecture consisting of an
            encoder and a decoder. The encoder is able to capture contextual information while the decoder enables
            precise localization. Due to the large amount of parameters, the input shape has to be small since for e.g.
            images of shape 144x144x144 the model already consumes 32 GB of memory.

            :param input_shape: Shape of the input data (x_size, y_size, z_size, n_channels).
            :param downsize_filters_factor: Factor to which to reduce the number of filters. Making this value larger
            will reduce the amount of memory the model will need during training.
            :param pool_size: Pool size for the max pooling operations.
            :param n_labels: Number of binary labels that the model is learning.
            :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
            :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of upsamping.
            This increases the amount memory required during training.
            :return: Untrained 3D UNet Model
            """
            inputs = Input(input_shape)
            conv1 = Conv3D(int(32 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(inputs)
            conv1 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv1)
            pool1 = MaxPooling3D(pool_size=pool_size)(conv1)

            conv2 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(pool1)
            conv2 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv2)
            pool2 = MaxPooling3D(pool_size=pool_size)(conv2)

            conv3 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(pool2)
            conv3 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv3)
            print(conv3.shape)
            pool3 = MaxPooling3D(pool_size=pool_size)(conv3)

            conv4 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(pool3)
            conv4 = Conv3D(int(512 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv4)
            print(conv4.shape)

            up5 = get_upconv(pool_size=pool_size,
                             deconvolution=deconvolution,
                             depth=2,
                             nb_filters=int(512 / downsize_filters_factor),
                             image_shape=input_shape[-3:])(conv4)
            print(up5.shape)
            up5 = concatenate([up5, conv3], axis=4)
            conv5 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(up5)
            conv5 = Conv3D(int(256 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv5)

            up6 = get_upconv(pool_size=pool_size,
                             deconvolution=deconvolution,
                             depth=1,
                             nb_filters=int(256 / downsize_filters_factor),
                             image_shape=input_shape[-3:])(conv5)
            up6 = concatenate([up6, conv2], axis=4)
            conv6 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(up6)
            conv6 = Conv3D(int(128 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv6)

            up7 = get_upconv(pool_size=pool_size,
                             deconvolution=deconvolution,
                             depth=0,
                             nb_filters=int(128 / downsize_filters_factor),
                             image_shape=input_shape[-3:])(conv6)
            up7 = concatenate([up7, conv1], axis=4)
            conv7 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(up7)
            conv7 = Conv3D(int(64 / downsize_filters_factor), (3, 3, 3),
                           activation='relu',
                           padding='same')(conv7)

            conv8 = Conv3D(n_labels, (1, 1, 1))(conv7)
            act = Activation('sigmoid')(conv8)
            model = Model(inputs=inputs, outputs=act)

            model.compile(optimizer=Adam(lr=initial_learning_rate),
                          loss=SegmentationModel.dice_coef_loss,
                          metrics=[SegmentationModel.dice_coef])

            return model
                          strides=(1, 1, 1),
                          padding="same")(h)
        h = BatchNormalization()(h)
        residual = Add()([x, h])
        x = Activation("relu")(residual)
        return x

    def compute_output_shape(self, input_shape):
        return input_shape


model = Sequential()
model.add(Convolution3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
#model.add(Residual(64,(3,3,3)))
model.add(Convolution3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
#model.add(Residual(128,(3,3,3)))
model.add(Convolution3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
#model.add(Residual(256,(3,3,3)))
model.add(Convolution3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
Ejemplo n.º 24
0
    def get_unet(self):
        inputs = Input((img_depth, img_rows, img_cols, 1))
        conv11 = Conv3D(32, (3, 3, 3), activation='relu',
                        padding='same')(inputs)
        conc11 = concatenate([inputs, conv11], axis=4)
        conv12 = Conv3D(32, (3, 3, 3), activation='relu',
                        padding='same')(conc11)
        conc12 = concatenate([inputs, conv12], axis=4)
        pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conc12)

        conv21 = Conv3D(64, (3, 3, 3), activation='relu',
                        padding='same')(pool1)
        conc21 = concatenate([pool1, conv21], axis=4)
        conv22 = Conv3D(64, (3, 3, 3), activation='relu',
                        padding='same')(conc21)
        conc22 = concatenate([pool1, conv22], axis=4)
        pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conc22)

        conv31 = Conv3D(128, (3, 3, 3), activation='relu',
                        padding='same')(pool2)
        conc31 = concatenate([pool2, conv31], axis=4)
        conv32 = Conv3D(128, (3, 3, 3), activation='relu',
                        padding='same')(conc31)
        conc32 = concatenate([pool2, conv32], axis=4)
        pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conc32)

        conv41 = Conv3D(256, (3, 3, 3), activation='relu',
                        padding='same')(pool3)
        conc41 = concatenate([pool3, conv41], axis=4)
        conv42 = Conv3D(256, (3, 3, 3), activation='relu',
                        padding='same')(conc41)
        conc42 = concatenate([pool3, conv42], axis=4)
        pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conc42)

        conv51 = Conv3D(512, (3, 3, 3), activation='relu',
                        padding='same')(pool4)
        conc51 = concatenate([pool4, conv51], axis=4)
        conv52 = Conv3D(512, (3, 3, 3), activation='relu',
                        padding='same')(conc51)
        conc52 = concatenate([pool4, conv52], axis=4)

        up6 = concatenate([
            Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2),
                            padding='same')(conc52), conc42
        ],
                          axis=4)
        conv61 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(up6)
        conc61 = concatenate([up6, conv61], axis=4)
        conv62 = Conv3D(256, (3, 3, 3), activation='relu',
                        padding='same')(conc61)
        conc62 = concatenate([up6, conv62], axis=4)

        up7 = concatenate([
            Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2),
                            padding='same')(conc62), conv32
        ],
                          axis=4)
        conv71 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up7)
        conc71 = concatenate([up7, conv71], axis=4)
        conv72 = Conv3D(128, (3, 3, 3), activation='relu',
                        padding='same')(conc71)
        conc72 = concatenate([up7, conv72], axis=4)

        up8 = concatenate([
            Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2),
                            padding='same')(conc72), conv22
        ],
                          axis=4)
        conv81 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up8)
        conc81 = concatenate([up8, conv81], axis=4)
        conv82 = Conv3D(64, (3, 3, 3), activation='relu',
                        padding='same')(conc81)
        conc82 = concatenate([up8, conv82], axis=4)

        up9 = concatenate([
            Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2),
                            padding='same')(conc82), conv12
        ],
                          axis=4)
        conv91 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9)
        conc91 = concatenate([up9, conv91], axis=4)
        conv92 = Conv3D(32, (3, 3, 3), activation='relu',
                        padding='same')(conc91)
        conc92 = concatenate([up9, conv92], axis=4)

        conv10 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conc92)

        model = Model(inputs=[inputs], outputs=[conv10])

        model.summary()
        #plot_model(model, to_file='model.png')

        model.compile(optimizer=Adam(lr=1e-5,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08,
                                     decay=0.000000199),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        return model
def C3D_conv_features(summary=False):
    """ Return the Keras model of the network until the fc6 layer where the
    convolutional features can be extracted.
    """
    from keras.layers.convolutional import Convolution3D, MaxPooling3D, ZeroPadding3D
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.models import Sequential

    model = Sequential()
    # 1st layer group
    model.add(
        Convolution3D(64,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv1',
                      subsample=(1, 1, 1),
                      input_shape=(3, 16, 112, 112),
                      trainable=False))
    model.add(
        MaxPooling3D(pool_size=(1, 2, 2),
                     strides=(1, 2, 2),
                     border_mode='valid',
                     name='pool1'))
    # 2nd layer group
    model.add(
        Convolution3D(128,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv2',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(
        MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     border_mode='valid',
                     name='pool2'))
    # 3rd layer group
    model.add(
        Convolution3D(256,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv3a',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(
        Convolution3D(256,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv3b',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(
        MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     border_mode='valid',
                     name='pool3'))
    # 4th layer group
    model.add(
        Convolution3D(512,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv4a',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(
        Convolution3D(512,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv4b',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(
        MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     border_mode='valid',
                     name='pool4'))
    # 5th layer group
    model.add(
        Convolution3D(512,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv5a',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(
        Convolution3D(512,
                      3,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      name='conv5b',
                      subsample=(1, 1, 1),
                      trainable=False))
    model.add(ZeroPadding3D(padding=(0, 1, 1), name='zeropadding'))
    model.add(
        MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     border_mode='valid',
                     name='pool5'))
    model.add(Flatten(name='flatten'))
    # FC layers group
    model.add(Dense(4096, activation='relu', name='fc6', trainable=False))
    model.add(Dropout(.5, name='do1'))
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(.5, name='do2'))
    model.add(Dense(487, activation='softmax', name='fc8'))

    # Load weights
    model.load_weights('data/models/c3d-sports1M_weights.h5')

    for _ in range(4):
        model.pop_layer()

    if summary:
        print(model.summary())
    return model
Ejemplo n.º 26
0
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  n_labels=1,
                  initial_learning_rate=0.00001,
                  deconvolution=False,
                  depth=4,
                  n_base_filters=32,
                  include_label_wise_dice_coefficients=False,
                  metrics=dice_coefficient,
                  batch_normalization=True,
                  activation_name="sigmoid"):

    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(
            input_layer=current_layer,
            n_filters=n_base_filters * (2**layer_depth),
            batch_normalization=batch_normalization)
        layer2 = create_convolution_block(
            input_layer=layer1,
            n_filters=n_base_filters * (2**layer_depth) * 2,
            batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = get_up_convolution(
            pool_size=pool_size,
            deconvolution=deconvolution,
            n_filters=current_layer._keras_shape[1])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=concat,
            batch_normalization=batch_normalization)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=current_layer,
            batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [
            get_label_dice_coefficient_function(index)
            for index in range(n_labels)
        ]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss=dice_coefficient_loss,
                  metrics=metrics)
    return model
Ejemplo n.º 27
0
    def build_generator(self):
        """U-Net Generator"""
        def conv3d(input_tensor,
                   n_filters,
                   kernel_size=(3, 3, 3),
                   batch_normalization=True,
                   scale=True,
                   padding='valid',
                   use_bias=False,
                   name=''):
            """
            3D convolutional layer (+ batch normalization) followed by ReLu activation
            """
            layer = Conv3D(filters=n_filters,
                           kernel_size=kernel_size,
                           padding=padding,
                           use_bias=use_bias,
                           name=name + '_conv3d')(input_tensor)
            # if batch_normalization:
            #     layer = BatchNormalization(name=name+'_bn')(layer)
            #layer = Activation('relu', name=name+'_actrelu')(layer)
            # Add BN after activation
            if batch_normalization:
                layer = BatchNormalization(momentum=0.8,
                                           name=name + '_bn',
                                           scale=scale)(layer)
            layer = LeakyReLU(alpha=0.2, name=name + '_actleakyrelu')(layer)
            return layer

        def deconv3d(input_tensor,
                     n_filters,
                     kernel_size=(3, 3, 3),
                     batch_normalization=True,
                     scale=True,
                     padding='valid',
                     use_bias=False,
                     name=''):
            """
            3D deconvolutional layer (+ batch normalization) followed by ReLu activation
            """
            layer = UpSampling3D(size=2)(input_tensor)
            layer = Conv3D(filters=n_filters,
                           kernel_size=kernel_size,
                           padding=padding,
                           use_bias=use_bias,
                           name=name + '_conv3d')(layer)
            # BN before activation
            if batch_normalization:
                layer = BatchNormalization(momentum=0.8,
                                           name=name + '_bn',
                                           scale=scale)(layer)
            layer = LeakyReLU(alpha=0.2, name=name + '_actleakyrelu')(layer)
            return layer

        img_S = Input(shape=self.input_shape_g,
                      name='input_img_S')  # 148x148x148
        img_T = Input(shape=self.input_shape_g,
                      name='input_img_T')  # 148x148x148

        # Concatenate subject image and template image by channels to produce input
        #combined_imgs = Concatenate(axis=-1, name='combine_imgs_g')([img_S, img_T])
        combined_imgs = Add(name='combine_imgs_g')([img_S, img_T])

        # downsampling
        down1 = conv3d(input_tensor=combined_imgs,
                       n_filters=self.gf,
                       padding='valid',
                       name='down1_1')  #146
        down1 = conv3d(input_tensor=down1,
                       n_filters=self.gf,
                       padding='valid',
                       name='down1_2')  #144
        pool1 = MaxPooling3D(pool_size=(2, 2, 2), name='pool1')(down1)  #72

        down2 = conv3d(input_tensor=pool1,
                       n_filters=2 * self.gf,
                       padding='valid',
                       name='down2_1')  #70
        down2 = conv3d(input_tensor=down2,
                       n_filters=2 * self.gf,
                       padding='valid',
                       name='down2_2')  #68
        pool2 = MaxPooling3D(pool_size=(2, 2, 2), name='pool2')(down2)  #34

        down3 = conv3d(input_tensor=pool2,
                       n_filters=4 * self.gf,
                       padding='valid',
                       name='down3_1')  #32
        down3 = conv3d(input_tensor=down3,
                       n_filters=4 * self.gf,
                       padding='valid',
                       name='down3_2')  #30
        pool3 = MaxPooling3D(pool_size=(2, 2, 2), name='pool3')(down3)  #15

        center = conv3d(input_tensor=pool3,
                        n_filters=8 * self.gf,
                        padding='valid',
                        name='center1')  #13
        center = conv3d(input_tensor=center,
                        n_filters=8 * self.gf,
                        padding='valid',
                        name='center2')  #11

        # upsampling with gap filling
        up3 = deconv3d(input_tensor=center,
                       n_filters=4 * self.gf,
                       padding='same',
                       name='up3')  #22
        gap3 = conv3d(input_tensor=down3,
                      n_filters=4 * self.gf,
                      padding='valid',
                      name='gap3_1')  #28
        gap3 = conv3d(input_tensor=gap3,
                      n_filters=4 * self.gf,
                      padding='valid',
                      name='gap3_2')  #26
        up3 = concatenate([Cropping3D(2)(gap3), up3], name='up3concat')  #22
        up3 = conv3d(input_tensor=up3,
                     n_filters=4 * self.gf,
                     padding='valid',
                     name='up3conv_1')  #20
        up3 = conv3d(input_tensor=up3,
                     n_filters=4 * self.gf,
                     padding='valid',
                     name='up3conv_2')  #18

        up2 = deconv3d(input_tensor=up3,
                       n_filters=2 * self.gf,
                       padding='same',
                       name='up2')  #36
        gap2 = conv3d(input_tensor=down2,
                      n_filters=2 * self.gf,
                      padding='valid',
                      name='gap2_1')  #66
        for i in range(2, 7):
            gap2 = conv3d(input_tensor=gap2,
                          n_filters=2 * self.gf,
                          padding='valid',
                          name='gap2_' + str(i))  #56

        up2 = concatenate([Cropping3D(10)(gap2), up2], name='up2concat')  #36
        up2 = conv3d(input_tensor=up2,
                     n_filters=2 * self.gf,
                     padding='valid',
                     name='up2conv_1')  #34
        up2 = conv3d(input_tensor=up2,
                     n_filters=2 * self.gf,
                     padding='valid',
                     name='up2conv_2')  #32

        up1 = deconv3d(input_tensor=up2,
                       n_filters=self.gf,
                       padding='same',
                       name='up1')  #64
        gap1 = conv3d(input_tensor=down1,
                      n_filters=self.gf,
                      padding='valid',
                      name='gap1_1')  #142
        for i in range(2, 21):
            gap1 = conv3d(input_tensor=gap1,
                          n_filters=self.gf,
                          padding='valid',
                          name='gap1_' + str(i))  #104
        up1 = concatenate([Cropping3D(20)(gap1), up1], name='up1concat')  #64
        up1 = conv3d(input_tensor=up1,
                     n_filters=self.gf,
                     padding='valid',
                     name='up1conv_1')  #62
        up1 = conv3d(input_tensor=up1,
                     n_filters=self.gf,
                     padding='valid',
                     name='up1conv_2')  #60

        phi = Conv3D(filters=3,
                     kernel_size=(1, 1, 1),
                     padding='same',
                     use_bias=False,
                     name='phi')(up1)  #60

        model = Model([img_S, img_T], outputs=phi, name='generator_model')

        return model
def get_3d_unet():

    inputs = Input((cm.slices_3d, cm.img_rows_3d, cm.img_cols_3d, 1),
                   name='layer_no_0_input')
    conv1 = Conv3D(filters=32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_1_conv')(inputs)
    conv1 = Conv3D(filters=32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_2_conv')(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_3')(conv1)

    conv2 = Conv3D(filters=64,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_4_conv')(pool1)
    conv2 = Conv3D(filters=64,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_5_conv')(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_6')(conv2)

    conv3 = Conv3D(filters=128,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_7_conv')(pool2)
    conv3 = Conv3D(filters=128,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_8_conv')(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_9')(conv3)

    conv4 = Conv3D(filters=256,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_10_conv')(pool3)
    conv4 = Conv3D(filters=256,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_11_conv')(conv4)
    pool4 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_12')(conv4)

    conv5 = Conv3D(filters=512,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_13_conv')(pool4)
    conv5 = Conv3D(filters=512,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_14_conv')(conv5)

    up6 = merge(
        [UpSampling3D(size=(2, 2, 2), name='layer_no_15')(conv5), conv4],
        mode='concat',
        concat_axis=-1,
        name='layer_no_16')
    conv6 = Conv3D(filters=256,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_17_conv')(up6)
    conv6 = Conv3D(filters=256,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_18_conv')(conv6)

    up7 = merge(
        [UpSampling3D(size=(2, 2, 2), name='layer_no_19')(conv6), conv3],
        mode='concat',
        concat_axis=-1,
        name='layer_no_20')
    conv7 = Conv3D(filters=128,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_21_conv')(up7)
    conv7 = Conv3D(filters=128,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_22_conv')(conv7)

    up8 = merge(
        [UpSampling3D(size=(2, 2, 2), name='layer_no_23')(conv7), conv2],
        mode='concat',
        concat_axis=-1,
        name='layer_no_24')
    conv8 = Conv3D(filters=64,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_25_conv')(up8)
    conv8 = Conv3D(filters=64,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_26_conv')(conv8)

    up9 = merge(
        [UpSampling3D(size=(2, 2, 2), name='layer_no_27')(conv8), conv1],
        mode='concat',
        concat_axis=-1,
        name='layer_no_28')
    conv9 = Conv3D(filters=32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_29_conv')(up9)
    conv9 = Conv3D(filters=32,
                   kernel_size=(3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   border_mode='same',
                   name='layer_no_30_last')(conv9)

    conv10 = Conv3D(filters=3,
                    kernel_size=(1, 1, 1),
                    strides=(1, 1, 1),
                    activation='sigmoid',
                    name='layer_no_31_output')(conv9)

    model = Model(input=inputs, output=conv10)

    # weights = np.array([1.0, 1.0, 1.0])
    # loss = lf.weighted_categorical_crossentropy_loss(weights)
    # model.compile(optimizer=Adam(lr=1.0e-5), loss="categorical_crossentropy", metrics=["categorical_accuracy"])
    # model.compile(optimizer=Adam(lr=1.0e-5), loss=loss, metrics=["categorical_accuracy"])
    model.compile(optimizer=Adam(lr=1.0e-6),
                  loss="categorical_crossentropy",
                  metrics=["categorical_accuracy"])
    # model.compile(optimizer=Adam(lr=1.0e-5), loss=lf.binary_crossentropy_loss, metrics=[lf.binary_crossentropy])

    return model
Ejemplo n.º 29
0
    def build_model(self,
                    img_shape=(32, 168, 168),
                    learning_rate=5e-5,
                    gpu_id=None,
                    nb_gpus=None,
                    trained_model=None):
        input_img = Input((*img_shape, 1), name='img_inp')
        unsupervised_label = Input((*img_shape, 3), name='unsup_label_inp')
        supervised_flag = Input(shape=img_shape, name='flag_inp')

        kernel_init = 'he_normal'
        sfs = 8  # start filter size
        bn = True
        do = True

        # normal train- without MC

        #######################################################
        conv1, conv1_b_m = self.downLayer(input_img, sfs, 1, bn)
        conv2, conv2_b_m = self.downLayer(conv1, sfs * 2, 2, bn)

        conv3 = Conv3D(sfs * 4, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(3) + '_1')(conv2)
        if bn:
            conv3 = BatchNormalization()(conv3)
        conv3 = Conv3D(sfs * 8, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(3) + '_2')(conv3)
        if bn:
            conv3 = BatchNormalization()(conv3)
        pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)

        conv4 = Conv3D(sfs * 16, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv4_1')(pool3)
        if bn:
            conv4 = BatchNormalization()(conv4)
        if do:
            conv4 = Dropout(0.5, seed=4, name='Dropout_' + str(4))(conv4)
        conv4 = Conv3D(sfs * 16, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv4_2')(conv4)
        if bn:
            conv4 = BatchNormalization()(conv4)

        # conv5 = upLayer(conv4, conv3_b_m, sfs*16, 5, bn, do)
        up1 = Conv3DTranspose(sfs * 16, (2, 2, 2),
                              strides=(2, 2, 2),
                              activation='relu',
                              padding='same',
                              name='up' + str(5))(conv4)
        up1 = concatenate([up1, conv3])
        conv5 = Conv3D(int(sfs * 8), (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(5) + '_1')(up1)
        if bn:
            conv5 = BatchNormalization()(conv5)
        if do:
            conv5 = Dropout(0.5, seed=5, name='Dropout_' + str(5))(conv5)
        conv5 = Conv3D(int(sfs * 8), (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(5) + '_2')(conv5)
        if bn:
            conv5 = BatchNormalization()(conv5)

        conv6 = self.upLayer(conv5, conv2_b_m, sfs * 8, 6, bn, do)
        conv7 = self.upLayer(conv6, conv1_b_m, sfs * 4, 7, bn, do)

        conv_out = Conv3D(3, (1, 1, 1),
                          name='conv_final_softmax',
                          activation='softmax')(conv7)

        bg_sm_out = Lambda(lambda x: x[:, :, :, :, 0], name='bg')(conv_out)
        z1_sm_out = Lambda(lambda x: x[:, :, :, :, 1], name='z1')(conv_out)
        z2_sm_out = Lambda(lambda x: x[:, :, :, :, 2], name='z2')(conv_out)

        bg_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 0],
                                  name='bgu')(unsupervised_label)
        z1_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 1],
                                  name='z1u')(unsupervised_label)
        z2_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 2],
                                  name='z2u')(unsupervised_label)

        bg = K.stack([bg_ensemble_pred, supervised_flag])
        z1 = K.stack([z1_ensemble_pred, supervised_flag])
        z2 = K.stack([z2_ensemble_pred, supervised_flag])

        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999)

        if (nb_gpus is None):
            p_model = Model([input_img, unsupervised_label, supervised_flag],
                            [bg_sm_out, z1_sm_out, z2_sm_out])
            if trained_model is not None:
                p_model.load_weights(trained_model)
                weights_list = p_model.get_weights()
            p_model.compile(optimizer=optimizer,
                            loss={
                                'bg':
                                self.semi_supervised_loss(
                                    bg, unsup_loss_class_wt=1),
                                'z1':
                                self.semi_supervised_loss(z1, 1),
                                'z2':
                                self.semi_supervised_loss(z2, 1),
                            },
                            metrics={
                                'bg': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(bg, 1),
                                    self.dice_tb(bg, 1)
                                ],
                                'z1': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(z1, 1),
                                    self.dice_tb(z1, 1)
                                ],
                                'z2': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(z2, 1),
                                    self.dice_tb(z2, 1)
                                ],
                            },
                            loss_weights={
                                'bg': 1,
                                'z1': 1,
                                'z2': 1
                            })
        else:
            with tf.device(gpu_id):
                model = Model([input_img, unsupervised_label, supervised_flag],
                              [conv_out])
                if trained_model is not None:
                    model.load_weights(trained_model)
                    weights_list = model.get_weights()

                p_model = multi_gpu_model(model, gpus=nb_gpus)
                p_model.compile(optimizer=optimizer,
                                loss={
                                    'bg':
                                    self.semi_supervised_loss(
                                        bg, unsup_loss_class_wt=1),
                                    'z1':
                                    self.semi_supervised_loss(z1, 1),
                                    'z2':
                                    self.semi_supervised_loss(z2, 1),
                                },
                                metrics={
                                    'bg': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(bg, 1),
                                        self.dice_tb(bg, 1)
                                    ],
                                    'z1': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(z1, 1),
                                        self.dice_tb(z1, 1)
                                    ],
                                    'z2': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(z2, 1),
                                        self.dice_tb(z2, 1)
                                    ],
                                },
                                loss_weights={
                                    'bg': 1,
                                    'z1': 1,
                                    'z2': 1
                                })

        #################################end###########################

        #####################MC########################################

        conv1, conv1_b_m = self.downLayer_MC(input_img, sfs, 1, bn)
        conv2, conv2_b_m = self.downLayer_MC(conv1, sfs * 2, 2, bn)

        conv3 = Conv3D(sfs * 4, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv_mc' + str(3) + '_1')(conv2)
        if bn:
            conv3 = BatchNormalization()(conv3)
        conv3 = Conv3D(sfs * 8, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv_mc' + str(3) + '_2')(conv3)
        if bn:
            conv3 = BatchNormalization()(conv3)
        pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
        # conv3, conv3_b_m = downLayer(conv2, sfs*4, 3, bn)

        conv4 = Conv3D(sfs * 16, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv4_1_mc')(pool3)
        if bn:
            conv4 = BatchNormalization()(conv4)
        if do:
            conv4 = Dropout(0.5, seed=4,
                            name='Dropout_mc' + str(4))(conv4, training=True)
        conv4 = Conv3D(sfs * 16, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv4_2_mc')(conv4)
        if bn:
            conv4 = BatchNormalization()(conv4)

        # conv5 = upLayer(conv4, conv3_b_m, sfs*16, 5, bn, do)
        up1 = Conv3DTranspose(sfs * 16, (2, 2, 2),
                              strides=(2, 2, 2),
                              activation='relu',
                              padding='same',
                              name='up_mc' + str(5))(conv4)
        up1 = concatenate([up1, conv3])
        conv5 = Conv3D(int(sfs * 8), (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv_mc' + str(5) + '_1')(up1)
        if bn:
            conv5 = BatchNormalization()(conv5)
        if do:
            conv5 = Dropout(0.5, seed=5,
                            name='Dropout_mc' + str(5))(conv5, training=True)
        conv5 = Conv3D(int(sfs * 8), (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv_mc' + str(5) + '_2')(conv5)
        if bn:
            conv5 = BatchNormalization()(conv5)

        conv6 = self.upLayer_MC(conv5, conv2_b_m, sfs * 8, 6, bn, do)
        conv7 = self.upLayer_MC(conv6, conv1_b_m, sfs * 4, 7, bn, do)

        conv_out_mc = Conv3D(3, (1, 1, 1),
                             activation='softmax',
                             name='conv_final_softmax_mc')(conv7)
        bg_sm_mc_out = Lambda(lambda x: x[:, :, :, :, 0],
                              name='bg')(conv_out_mc)
        z1_sm_mc_out = Lambda(lambda x: x[:, :, :, :, 1],
                              name='z1')(conv_out_mc)
        z2_sm_mc_out = Lambda(lambda x: x[:, :, :, :, 2],
                              name='z2')(conv_out_mc)

        model_MC = Model([input_img, unsupervised_label, supervised_flag],
                         [bg_sm_mc_out, z1_sm_mc_out, z2_sm_mc_out])

        model_MC.set_weights(weights_list)
        # for layer in model_MC.layers:
        #    layer.trainable = False

        ########################################################

        return p_model, model_MC
                #     input1)  # change to leakyRelu to avoid dead neurons
                # x = Conv3D(64, (3, 3, 3), padding='same', activation='relu')(x)  # change to leakyRelu to avoid dead neurons
                # x = MaxPooling3D((2, 2, 2))(x)
                # x2 = Flatten()(x)
                fms = 8
                input1 = Input(shape=(13, 13, 13, 1), name="inputs")

                params = dict(kernel_size=(3, 3, 3), activation=None,
                              padding="same", kernel_initializer="he_uniform")

                # Transposed convolution parameters
                params_trans = dict(kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")

                # BEGIN - Encoding path
                encodeA = ConvolutionBlock(input1, "encodeA", fms, params)
                poolA = MaxPooling3D(name="poolA", pool_size=(2, 2, 2))(encodeA)

                encodeB = ConvolutionBlock(poolA, "encodeB", fms * 2, params)
                poolB = MaxPooling3D(name="poolB", pool_size=(2, 2, 2))(encodeB)
                #
                # encodeC = ConvolutionBlock(poolB, "encodeC", fms * 4, params)
                # poolC = MaxPooling3D(name="poolC", pool_size=(2, 2, 2))(encodeC)
                #
                # encodeD = ConvolutionBlock(poolC, "encodeD", fms * 8, params)

                x2 = Flatten()(poolB)
            else:
                # # Relu
                input1 = keras.layers.Input(shape=(XIMs_train.shape[1],))
                # ,kernel_regularizer=keras.regularizers.l2(l=0.2)
                x1 = keras.layers.Dense(32, input_dim=XIMs_train.shape[1], activation='relu')(input1)