コード例 #1
0
def res_net_3d(num_channel):
    
    """
    The resnet with same number of 3x3x3 convolutional layers as 3D densenet
    Each block has 4 small conv_bn_relu blocks
    num_channel: number of channels
    """
    
    # input layer
    inp = Input((16, 16, 16, num_channel))
    # first block
    x = conv_bn_relu(inp, 64, 3, "same")
    x1 = x
    for _ in range(4):
        x = conv_bn_relu(x, 64, 3, "same")
    x = Add()([x, x1])
    x = AveragePooling3D(2)(x)
    
    # second block
    x1 = x
    for _ in range(4):
        x = conv_bn_relu(x, 64, 3, "same")
    x = Add()([x, x1])
    x = AveragePooling3D(2)(x)
    
    # final block
    x = Flatten()(x)
    x = dense_bn_relu(x, 256, 0.1)
    x = dense_bn_relu(x, 128, 0.1)
    y = Dense(1)(x)
    return Model(inp, y)
コード例 #2
0
def densenet_baseline_1(num_channel):
    """
    The baseline (regular) DenseNet with two DenseNet blocks
    Each block has 4 small conv_bn_relu blocks
    num_channel: number of channels
    """

    # input layer
    inp = Input((16, 16, 16, num_channel))
    # first transition layer
    x = conv_bn_relu(inp, 64, 3, "same")
    # first block
    x1 = conv_bn_relu(x, 256, 1, "same")
    x1 = conv_bn_relu(x1, 64, 1, "same")
    x1 = conv_bn_relu(x1, 64, 3, "same")
    x2 = Concatenate()([x, x1])
    x2 = conv_bn_relu(x2, 256, 1, "same")
    x2 = conv_bn_relu(x2, 64, 1, "same")
    x2 = conv_bn_relu(x2, 64, 3, "same")
    x3 = Concatenate()([x, x1, x2])
    x3 = conv_bn_relu(x3, 256, 1, "same")
    x3 = conv_bn_relu(x3, 64, 1, "same")
    x3 = conv_bn_relu(x3, 64, 3, "same")
    x4 = Concatenate()([x, x1, x2, x3])
    x4 = conv_bn_relu(x4, 256, 1, "same")
    x4 = conv_bn_relu(x4, 64, 1, "same")
    x4 = conv_bn_relu(x4, 64, 3, "same")
    x = AveragePooling3D(2)(x4)

    # second transition layer
    x = conv_bn_relu(x, 64, 1, "same")
    # second block
    x1 = conv_bn_relu(x, 256, 1, "same")
    x1 = conv_bn_relu(x1, 64, 1, "same")
    x1 = conv_bn_relu(x1, 64, 3, "same")
    x2 = Concatenate()([x, x1])
    x2 = conv_bn_relu(x2, 256, 1, "same")
    x2 = conv_bn_relu(x2, 64, 1, "same")
    x2 = conv_bn_relu(x2, 64, 3, "same")
    x3 = Concatenate()([x, x1, x2])
    x3 = conv_bn_relu(x3, 256, 1, "same")
    x3 = conv_bn_relu(x3, 64, 1, "same")
    x3 = conv_bn_relu(x3, 64, 3, "same")
    x4 = Concatenate()([x, x1, x2, x3])
    x4 = conv_bn_relu(x4, 256, 1, "same")
    x4 = conv_bn_relu(x4, 64, 1, "same")
    x4 = conv_bn_relu(x4, 64, 3, "same")
    x = AveragePooling3D(2)(x4)

    # final block
    x = Flatten()(x)
    x = dense_bn_relu(x, 256, 0.1)
    x = dense_bn_relu(x, 128, 0.1)
    y = Dense(1)(x)
    return Model(inp, y)
コード例 #3
0
 def cnn_3D(self, input_shape, modual=''):
     #建立Sequential模型    
     model = Sequential() 
     model.add(Convolution3D(
             filters = 8,
             kernel_size = (3, 3, 3),
             input_shape = input_shape, #40x40x5x1
             padding = (0,0,1),
             activation='relu',
             kernel_initializer='he_normal',
             name = modual+'conv1'
         ))# now 38x38x5x8
     model.add(MaxPooling3D(pool_size=(2,2,1)))# now 19x19x5x8
     model.add(Convolution3D(
             filters = 16,
             kernel_size = (4, 4, 3),
             activation='relu',
             kernel_initializer='he_normal',
             name = modual+'conv2'
         ))# now 16x16x3x16
     model.add(MaxPooling3D(pool_size=(2,2,1)))# now 8x8x1x16
     model.add(Convolution3D(
             filters = 32,
             kernel_size = (3, 3, 3),
             activation='relu',
             kernel_initializer='he_normal',
             name = modual+'conv2'
         ))# now 6x6x1x32  
     model.add(AveragePooling3D(pool_size=(6,6,1)))# now 1x1x1x32            
     model.add(Flatten())
     # model.add(Dropout(0.5))
     model.add(Dense(32, activation='relu', name = modual+'fc1'))
   
     return model    
コード例 #4
0
def transition_block3d(x,
                       stage,
                       nb_filter,
                       compression=1.0,
                       dropout_rate=None,
                       weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout 
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = '3dconv' + str(stage) + '_blk'
    relu_name_base = '3drelu' + str(stage) + '_blk'
    pool_name_base = '3dpool' + str(stage)

    x = BatchNormalization(epsilon=eps, axis=4, name=conv_name_base + '_bn')(x)
    x = Scale(axis=4, name=conv_name_base + '_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Conv3D(int(nb_filter * compression), (1, 1, 1),
               name=conv_name_base,
               use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling3D((2, 2, 1), strides=(2, 2, 1), name=pool_name_base)(x)

    return x
コード例 #5
0
    def build(input_shape,
              num_outputs,
              block_fn,
              repetitions,
              reg_factor,
              drop_rate=0):

        _handle_data_format()
        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1, conv_dim2, conv_dim3, channels) "
                             "for tensorflow as backend or "
                             "(channels, conv_dim1, conv_dim2, conv_dim3) "
                             "for theano as backend")

        block_fn = _get_block(block_fn)
        input = Input(shape=input_shape)
        # first conv
        conv1 = _conv_bn_relu3D(filters=64,
                                kernel_size=(7, 7, 7),
                                strides=(2, 2, 2),
                                kernel_regularizer=l2(reg_factor))(input)
        pool1 = MaxPooling3D(pool_size=(3, 3, 3),
                             strides=(2, 2, 2),
                             padding="same")(conv1)

        # repeat blocks
        block = pool1
        filters = 64
        for i, r in enumerate(repetitions):
            block = _residual_block3d(block_fn,
                                      filters=filters,
                                      kernel_regularizer=l2(reg_factor),
                                      repetitions=r,
                                      is_first_layer=(i == 0))(block)
            filters *= 2
            block = Dropout(drop_rate)(block)

        # last activation
        block_output = _bn_relu(block)

        # average poll and classification
        pool2 = AveragePooling3D(pool_size=(block.shape[DIM1_AXIS],
                                            block.shape[DIM2_AXIS],
                                            block.shape[DIM3_AXIS]),
                                 strides=(1, 1, 1))(block_output)
        flatten1 = Flatten()(pool2)
        if num_outputs > 1:
            dense = Dense(units=num_outputs,
                          kernel_initializer="he_normal",
                          activation="softmax",
                          kernel_regularizer=l2(reg_factor))(flatten1)
        else:
            dense = Dense(units=num_outputs,
                          kernel_initializer="he_normal",
                          activation="sigmoid",
                          kernel_regularizer=l2(reg_factor))(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model
def classifier_block(x, nb_filters, nb_classes, activation, name):
    """
    Classifier block
    :param x: input tensor
    :param nb_filters: integer, number of filters
    :param nb_classes: integer, number of classes
    :param activation: string, activation function
    :param name: string, block label

    :return: block tensor
    """
    x = _convbnrelu(x,
                    nb_filters=nb_filters,
                    stride=2,
                    kernel_size=3,
                    name=name + "_1")
    x = _convbnrelu(x,
                    nb_filters=nb_filters,
                    stride=2,
                    kernel_size=3,
                    name=name + "_2")
    x = AveragePooling3D(pool_size=2,
                         strides=2,
                         padding='same',
                         name=name + '_avg_pool3d')(x)
    x = Flatten(name=name + "_flatten")(x)
    out = Dense(units=nb_classes, activation=activation,
                name=name + "_dense")(x)
    return out
コード例 #7
0
    def build(input_shape, num_outputs, block_fn, repetitions, reg_factor):
        """Instantiate a vanilla ResNet3D keras model.

        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'
            (filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'
            num_outputs: The number of outputs at the final softmax layer
            block_fn: Unit block to use {'basic_block', 'bottlenack_block'}
            repetitions: Repetitions of unit blocks
        # Returns
            model: a 3D ResNet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """
        _handle_data_format()
        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1, conv_dim2, conv_dim3, channels) "
                             "for tensorflow as backend or "
                             "(channels, conv_dim1, conv_dim2, conv_dim3) "
                             "for theano as backend")

        block_fn = _get_block(block_fn)
        input = Input(shape=input_shape)
        # first conv
        conv1 = _conv_bn_relu3D(filters=64,
                                kernel_size=(7, 7, 7),
                                strides=(2, 2, 2),
                                kernel_regularizer=l2(reg_factor))(input)
        pool1 = MaxPooling3D(pool_size=(3, 3, 3),
                             strides=(2, 2, 2),
                             padding="same")(conv1)

        # repeat blocks
        block = pool1
        filters = 64
        for i, r in enumerate(repetitions):
            block = _residual_block3d(block_fn,
                                      filters=filters,
                                      kernel_regularizer=l2(reg_factor),
                                      repetitions=r,
                                      is_first_layer=(i == 0))(block)
            filters *= 2

        # last activation
        block_output = _bn_relu(block)

        # average poll and classification
        pool2 = AveragePooling3D(pool_size=(block._keras_shape[DIM1_AXIS],
                                            block._keras_shape[DIM2_AXIS],
                                            block._keras_shape[DIM3_AXIS]),
                                 strides=(1, 1, 1))(block_output)
        flatten1 = Flatten()(pool2)
        dense = Dense(units=num_outputs,
                      kernel_initializer="he_normal",
                      kernel_regularizer=l2(reg_factor))(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model
コード例 #8
0
def mr_densenet_with_flow(num_channel):
    """
    The MR-DenseNet with two DenseNet blocks. This implementation is exactly the same as original DenseNet.
    Each block has 4 small conv_bn_relu blocks
    At the end of each block, we concatenate the center segment of the pooling layer
    num_channel: number of channels
    """

    # input layer
    inp = Input((16, 16, 16, num_channel))
    # first transition layer
    x = conv_bn_relu(inp, 64, 3, "same")
    # first block
    for _ in range(4):
        x1 = conv_bn_relu(x, 256, 1, "same")
        x1 = conv_bn_relu(x1, 64, 1, "same")
        x1 = conv_bn_relu(x1, 64, 3, "same")
        x = Concatenate()([x, x1])
    x1 = AveragePooling3D(2)(x)
    x2 = Lambda(lambda x: x[:, 4:-4, 4:-4, 4:-4])(x)
    x = Concatenate()([x1, x2])

    # second transition layer
    x = conv_bn_relu(x, 256, 1, "same")
    x = conv_bn_relu(x, 64, 1, "same")
    # first block
    for _ in range(4):
        x1 = conv_bn_relu(x, 256, 1, "same")
        x1 = conv_bn_relu(x1, 64, 1, "same")
        x1 = conv_bn_relu(x1, 64, 3, "same")
        x = Concatenate()([x, x1])
    x1 = AveragePooling3D(2)(x)
    x2 = Lambda(lambda x: x[:, 2:-2, 2:-2, 2:-2])(x)
    x = Concatenate()([x1, x2])

    # final transition layer
    x = conv_bn_relu(x, 256, 1, "same")
    x = conv_bn_relu(x, 64, 1, "same")
    x = Flatten()(x)
    x = dense_bn_relu(x, 256, 0.1)
    x = dense_bn_relu(x, 128, 0.1)
    y = Dense(1)(x)
    return Model(inp, y)
コード例 #9
0
def discriminator(fixed_bn = False, discr_drop_out=0.2):

    image = Input(shape=( 25, 25, 25,1 ), name='image')

    bnm=2 if fixed_bn else 0
    f=(5,5,5)
    x = _Conv3D(32, 5, 5,5,border_mode='same',
               name='disc_c1')(image)
    x = LeakyReLU()(x)
    x = Dropout(discr_drop_out)(x)

    x = ZeroPadding3D((2, 2,2))(x)
    x = _Conv3D(8, 5, 5,5,border_mode='valid',
               name='disc_c2'
    )(x)
    x = LeakyReLU()(x)
    x = _BatchNormalization(name='disc_bn1',
                           mode=bnm,
    )(x)
    x = Dropout(discr_drop_out)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = _Conv3D(8, 5, 5,5,border_mode='valid',
               name='disc_c3'
)(x)
    x = LeakyReLU()(x)
    x = _BatchNormalization(name='disc_bn2',
                           #momentum = 0.00001
                           mode=bnm,
    )(x)
    x = Dropout(discr_drop_out)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = _Conv3D(8, 5, 5,5,border_mode='valid',
               name='disc_c4'
    )(x)
    x = LeakyReLU()(x)
    x = _BatchNormalization(name='disc_bn3',
                           mode=bnm,
    )(x)
    x = Dropout(discr_drop_out)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = _Model(input=image, output=h, name='dnn')

    dnn_out = dnn(image)

    fake = _Dense(1, activation='sigmoid', name='classification')(dnn_out)
    aux = _Dense(1, activation='linear', name='energy')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=(1, 2, 3)), name='sum_cell')(image)

    return _Model(output=[fake, aux, ecal], input=image, name='discriminator_model')
コード例 #10
0
def resnet(shape, classes, is_regression=False):
    inpt = Input(shape=shape)
    x = ZeroPadding3D((1, 1, 1), data_format='channels_first')(inpt)

    # conv1
    x = Conv3d_BN(x,
                  nb_filter=16,
                  kernel_size=(6, 6, 6),
                  strides=1,
                  padding='valid')
    x = MaxPooling3D(pool_size=(3, 3, 3),
                     strides=2,
                     data_format='channels_first')(x)

    # conv2_x
    x = identity_Block(x,
                       nb_filter=32,
                       kernel_size=(2, 2, 2),
                       strides=1,
                       with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=32, kernel_size=(2, 2, 2))
    #     x = identity_Block(x, nb_filter=64, kernel_size=(3, 3, 3))

    # conv3_x
    x = identity_Block(x,
                       nb_filter=64,
                       kernel_size=(2, 2, 2),
                       strides=1,
                       with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=64, kernel_size=(2, 2, 2))
    #     x = identity_Block(x, nb_filter=128, kernel_size=(3, 3, 3))
    #     x = identity_Block(x, nb_filter=128, kernel_size=(3, 3, 3))

    #     # conv4_x
    #     x = identity_Block(x, nb_filter=256, kernel_size=(3, 3, 3), strides=2, with_conv_shortcut=True)
    #     x = identity_Block(x, nb_filter=256, kernel_size=(3, 3, 3))
    #     x = identity_Block(x, nb_filter=256, kernel_size=(3, 3, 3))

    x = AveragePooling3D(pool_size=(2, 2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    if is_regression:
        x = Dense(classes)(x)
    else:
        x = Dense(classes, activation='softmax')(x)

    model = Model(inputs=inpt, outputs=x)

    return model
コード例 #11
0
ファイル: voxresnet.py プロジェクト: conanhung/keras-resnet3d
    def build_classification(input_shape, num_outputs):
        """Instantiate a keras model for VoxResNet
        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'
            (nb_filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'
            num_outputs: The number of outputs at the final softmax layer.
        # Returns
            model: a VoxResNet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """

        _handle_dim_ordering()
        if len(input_shape) != 4:
            raise Exception("Input shape should be a tuple "
                            "(conv_dim1, conv_dim2, conv_dim3, channels) "
                            "for tensorflow as backend or "
                            "(channels, conv_dim1, conv_dim2, conv_dim3) "
                            "for theano as backend")

        input = Input(shape=input_shape)
        conv1a = Convolution3D(nb_filter=32,
                               kernel_dim1=3,
                               kernel_dim2=3,
                               kernel_dim3=3,
                               init="he_normal",
                               border_mode="same",
                               W_regularizer=l2(1.e-4))(input)
        conv1b = _bn_relu_conv3d(nb_filter=32,
                                 kernel_dim1=3,
                                 kernel_dim2=3,
                                 kernel_dim3=3,
                                 subsample=(1, 1, 1))(conv1a)
        voxres9 = _voxel_residual_block(nb_filter=64,
                                        repetitions=3)(conv1b)[-1]

        # Last activation
        block = _bn_relu(voxres9)
        block_norm = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(block)
        block_output = Activation("relu")(block_norm)

        # Classifier block
        pool1 = AveragePooling3D(pool_size=(block._keras_shape[DIM1_AXIS],
                                            block._keras_shape[DIM2_AXIS],
                                            block._keras_shape[DIM3_AXIS]),
                                 strides=(1, 1, 1))(block_output)
        flatten1 = Flatten()(pool1)
        dense = Dense(output_dim=num_outputs,
                      init="he_normal",
                      activation="softmax")(flatten1)

        model = Model(input=input, output=dense)
        return model
コード例 #12
0
def create_wide_residual_network(input,
                                 nb_classes=100,
                                 N=2,
                                 k=1,
                                 dropout=0.0,
                                 verbose=1):
    """
    Creates a Wide Residual Network with specified parameters

    :param input: Input Keras object
    :param nb_classes: Number of output classes
    :param N: Depth of the network. Compute N = (n - 4) / 6.
              Example : For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
              Example2: For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
              Example3: For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
    :param k: Width of the network.
    :param dropout: Adds dropout if value is greater than 0.0
    :param verbose: Debug info to describe created WRN
    :return:
    """
    x = initial_conv(input)
    nb_conv = 4

    for i in range(N):
        x = conv1_block(x, k, dropout)
        nb_conv += 2

    x = MaxPooling3D((2, 2, 2))(x)

    for i in range(N):
        x = conv2_block(x, k, dropout)
        nb_conv += 2

    #x = MaxPooling3D((2,2,2))(x)

    #for i in range(N):
    #    x = conv3_block(x, k, dropout)
    #    nb_conv += 2

    x = AveragePooling3D((8, 8, 8))(x)  # strides=(2,2,2)
    x = Flatten()(x)

    x = Dense(nb_classes,
              activation='softmax',
              W_regularizer=l2(weight_decay),
              bias=use_bias)(x)

    if verbose:
        print("Wide Residual Network-%d-%d created." % (nb_conv, k))
    return x
コード例 #13
0
 def build(input_shape, num_outputs, reg_factor):
     input = Input(shape=input_shape)
     x = _conv_bn_relu3D(filters=64,
                         kernel_size=(7, 7, 7),
                         strides=(2, 2, 2),
                         kernel_regularizer=l2(reg_factor))(input)
     x = MaxPooling3D(pool_size=(1, 3, 3),
                      strides=(1, 2, 2),
                      padding='same')(x)
     x = _conv_bn_relu3D(filters=64,
                         kernel_size=(1, 1, 1),
                         strides=(1, 1, 1),
                         kernel_regularizer=l2(reg_factor))(x)
     x = _conv_bn_relu3D(filters=192,
                         kernel_size=(3, 3, 3),
                         strides=(1, 1, 1),
                         kernel_regularizer=l2(reg_factor))(x)
     x = MaxPooling3D(pool_size=(1, 3, 3),
                      strides=(1, 2, 2),
                      padding='same')(x)
     x = _inception3d(x)
     x = _inception3d(x)
     x = MaxPooling3D(pool_size=(3, 3, 3),
                      strides=(2, 2, 2),
                      padding='same')(x)
     x = _inception3d(x)
     x = _inception3d(x)
     x = _inception3d(x)
     x = _inception3d(x)
     x = _inception3d(x)
     x = MaxPooling3D(pool_size=(2, 2, 2),
                      strides=(2, 2, 2),
                      padding='same')(x)
     x = _inception3d(x)
     x = _inception3d(x)
     x = AveragePooling3D(pool_size=(2, 4, 4),
                          strides=(1, 1, 1),
                          padding='valid')(x)
     #        x = _conv_bn_relu3D(filters=192, kernel_size=1, 1, 1),
     #                                strides=(1, 1, 1),
     #                                kernel_regularizer=l2(reg_factor)
     #                               )(x)
     flatten1 = Flatten()(x)
     out = Dense(units=num_outputs,
                 kernel_initializer="he_normal",
                 activation="sigmoid",
                 kernel_regularizer=l2(reg_factor))(flatten1)
     model = Model(inputs=input, outputs=out)
     return model
コード例 #14
0
ファイル: cnn3d.py プロジェクト: sam186/LungCancer
    def __define_model__(self,
                         input_shape=(300, 1, 300, 300),
                         nb_classes=2,
                         n_filters=[32, 32],
                         filter_sizes=[6, 6],
                         filter_strides=[1, 1],
                         border_modes=['valid', 'same'],
                         pool_sizes=[2, 2],
                         filter_drops=[0.25, 0.25],
                         dense_ns=[256, 64],
                         dense_drops=[0.5, 0.5],
                         pool_method="max"):

        model = Sequential()
        model.add(
            Convolution3D(nb_filter=n_filters[0],
                          kernel_dim1=filter_sizes[0],
                          kernel_dim2=filter_sizes[0],
                          kernel_dim3=filter_sizes[0],
                          init='normal',
                          W_regularizer=l2(0.4),
                          subsample=(filter_strides[0], filter_strides[0],
                                     filter_strides[0]),
                          border_mode=border_modes[0],
                          input_shape=input_shape))
        model.add(Activation('relu'))
        if pool_method == "Max" or pool_method == "MaxPooling" or pool_method == "max":
            model.add(
                MaxPooling3D(pool_size=(pool_sizes[0], pool_sizes[0],
                                        pool_sizes[0]),
                             border_mode=border_modes[0]))
        elif pool_method == "Avg" or pool_method == "avg" or pool_method == "average" or pool_method == "Average":
            model.add(
                AveragePooling3D(pool_size=(pool_sizes[0], pool_sizes[0],
                                            pool_sizes[0]),
                                 border_mode=border_modes[0]))

        model.add(Dropout(filter_drops[0]))

        model.add(Flatten())
        model.add(Dense(dense_ns[0], init='normal'))
        model.add(Activation('relu'))
        model.add(Dropout(dense_drops[0]))

        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        return model
コード例 #15
0
ファイル: ecalvegan3.py プロジェクト: jaybooth4/scc18
def discriminator():

    image = Input(shape=(25, 25, 25, 1))

    x = Conv3D(32, 5, 5,5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2,2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)  #added
    x = Conv3D(8, 5, 5,5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)
    
    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(16, 3, 3, 3, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)
    
    dnn = Model(image, h)
    dnn.summary()
    image = Input(shape=(25, 25, 25, 1))

    dnn_out = dnn(image)


    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='linear', name='auxiliary')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=(1, 2, 3)))(image)
    Model(input=image, output=[fake, aux, ecal]).summary()
    return Model(input=image, output=[fake, aux, ecal])
コード例 #16
0
def __transition_block(input,
                       nb_filter,
                       compression=1.0,
                       concat_axis=-1,
                       bn_axis=-1,
                       bias_allow=False):

    x = BatchNormalization(axis=bn_axis, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv3D(int(nb_filter * compression),
               1,
               strides=1,
               kernel_initializer='he_normal',
               use_bias=bias_allow)(x)
    x = AveragePooling3D((2, 2, 2), strides=(2, 2, 1))(x)

    return x
コード例 #17
0
def discriminator(keras_dformat='channels_last'):

    if keras_dformat == 'channels_last':
        dshape = (25, 25, 25, 1)
        daxis = (1, 2, 3)
    else:
        dshape = (1, 25, 25, 25)
        daxis = (2, 3, 4)

    image = Input(shape=dshape)

    x = Conv3D(32, (5, 5, 5), data_format=keras_dformat, padding='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, data_format=keras_dformat, padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, data_format=keras_dformat, padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, data_format=keras_dformat, padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    dnn_out = dnn(image)

    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='linear', name='auxiliary')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=daxis))(image)
    Model(input=image, output=[fake, aux, ecal]).summary()
    return Model(input=image, output=[fake, aux, ecal])
コード例 #18
0
    def build(input_shape, num_outputs, block_fn, repetitions):
        """Builds a custom ResNet like architecture.
        Args:
            input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
            num_outputs: The number of outputs at final softmax layer
            block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
                The original paper used basic_block for layers < 50
            repetitions: Number of repetitions of various block units.
                At each block unit, the number of filters are doubled and the input size is halved
        Returns:
            The keras `Model`.
        """
        _handle_dim_ordering()
        if len(input_shape) != 4:
            raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[2],input_shape[3], input_shape[0])

        # Load function from str if needed.
        block_fn = _get_block(block_fn)

        input = Input(shape=input_shape)
        conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7,7), strides=(2, 2,2))(input)
        pool1 = MaxPooling3D(pool_size=(3, 3,3), strides=(2, 2,2), padding="same")(conv1)

        block = pool1
        filters = 64
        for i, r in enumerate(repetitions):
            block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
            filters *= 2

        # Last activation
        block = _bn_relu(block)
        # Classifier block
        block_shape = K.int_shape(block)
        pool2 = AveragePooling3D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS],block_shape[DEPTH_AXIS]),
                                 strides=(1, 1,1))(block)
        flatten1 = Flatten()(pool2)
        dense = Dense(units=num_outputs, kernel_initializer="he_normal",
                      activation="softmax")(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model
コード例 #19
0
def discriminator(dflag=0, df=8, dx=5, dy=5, dz=5, dp=0.2):

    image = Input(shape=(25, 25, 25, 1))
    x = image
    if dflag == 1:
        x = Conv3D(df, dx, dy, dz, border_mode='same')(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)
        x = Dropout(dp)(x)

    x = Conv3D(32, 5, 5, 5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(dp)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dp)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dp)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dp)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    dnn_out = dnn(image)

    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='linear', name='auxiliary')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=(1, 2, 3)))(image)
    Model(input=image, output=[fake, aux, ecal])
    return Model(input=image, output=[fake, aux, ecal])
コード例 #20
0
    def __init__(self, dims, is_nearest=False):
        start_shape = [1]
        for i in range(0, dims):
            start_shape.insert(0, None)
        shape = list(start_shape)

        voxel = Input(shape=shape)

        if dims == 3:
            if is_nearest:
                x = MaxPooling3D()(voxel)
            else:
                x = AveragePooling3D()(voxel)
        elif dims == 2:
            if is_nearest:
                x = MaxPooling2D()(voxel)
            else:
                x = AveragePooling2D()(voxel)

        self.model = Model(inputs=voxel, outputs=x)
コード例 #21
0
def transition_block(x, reduction, name):
    """A transition block.

    # Arguments
        x: input tensor.
        reduction: float, compression rate at transition layers.
        name: string, block label.

    # Returns
        output tensor for the block.
    """
    bn_axis = 4 if K.image_data_format() == 'channels_last' else 1
    x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                           name=name + '_bn')(x)
    x = Activation('relu', name=name + '_relu')(x)
    x = Conv3D(int(K.int_shape(x)[bn_axis] * reduction),
               1,
               use_bias=False,
               name=name + '_conv')(x)
    x = AveragePooling3D(1, strides=2, name=name + '_pool')(x)
    return x
コード例 #22
0
ファイル: EnergyGan.py プロジェクト: shruti-sharan/3Dgan
def discriminator():

    image = Input(shape=(25, 25, 25, 1))

    x = Conv3D(32, 5, 5, 5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    image = Input(shape=(25, 25, 25, 1))

    dnn_out = dnn(image)

    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='relu', name='auxiliary')(dnn_out)

    Model(input=image, output=[fake, aux]).summary()
    return Model(input=image, output=[fake, aux])
コード例 #23
0
ファイル: CNNs_3D.py プロジェクト: sahahn/GenDiagFramework
def Unet_Inspired(input_shape, depth=6, n_base_filters=16):

    inputs = Input(input_shape)
    current_layer = inputs

    level_output_layers = list()
    level_filters = list()

    for level_number in range(depth):
        n_level_filters = (2**level_number) * n_base_filters
        level_filters.append(n_level_filters)

        if current_layer is inputs:
            in_conv = create_convolution_block(current_layer, n_level_filters)
        else:
            in_conv = create_convolution_block(current_layer,
                                               n_level_filters,
                                               strides=(2, 2, 2))

        context_output_layer = create_context_module(in_conv,
                                                     n_level_filters,
                                                     dropout_rate=0.3)

        summation_layer = Add()([in_conv, context_output_layer])
        level_output_layers.append(summation_layer)
        current_layer = summation_layer

    avgpool = AveragePooling3D(pool_size=4, strides=(1, 1, 1))(current_layer)
    flatten = Flatten()(avgpool)
    dense = Dense(units=1,
                  kernel_initializer="he_normal",
                  activation="sigmoid")(flatten)

    activation_block = Activation('sigmoid')(dense)

    model = Model(inputs=inputs, outputs=activation_block)

    return model
コード例 #24
0
def create_pre_residual_of_residual(input_dim, nb_classes=100, N=2, k=1, dropout=0.0, verbose=1):
    """
    Creates a Residual Network of Residual Network with specified parameters
    Example : To create a Pre-RoR model, use k = 1
              model = create_pre_residual_of_residual((3, 32, 32), 10, N=4, k=1) # Pre-RoR-3
              To create a RoR-WRN model, use k > 1
              model = create_pre_residual_of_residual((3, 32, 32), 10, N=4, k=10) # RoR-3-WRN-28-10
    :param input: Input Keras object
    :param nb_classes: Number of output classes
    :param N: Depth of the network. Compute N = (n - 4) / 6.
              Example : For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
              Example2: For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
              Example3: For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
    :param k: Width of the network.
    :param dropout: Adds dropout if value is greater than 0.0.
                    Note : Generally not used in RoR
    :param verbose: Debug info to describe created WRN
    :return:
    """
    ip = Input(shape=input_dim,)

    channel_axis = 1 if K.image_dim_ordering() == "th" else -1

    x = initial_conv(ip)
    nb_conv = 4 # Dont count 4 long residual connections in WRN models

    conv0_level1_shortcut = Convolution3D(64 * k, 1, 1, 1, border_mode='same', subsample=(4, 4, 4),
                                          name='conv0_level1_shortcut')(x)

    conv1_level2_shortcut = Convolution3D(16 * k, 1, 1, 1, border_mode='same',
                                          name='conv1_level2_shortcut')(x)
    for i in range(N):
        initial = (i == 0)
        x = conv1_block(x, k, dropout, initial=initial)
        nb_conv += 2

    # Add Level 2 shortcut
    x = merge([x, conv1_level2_shortcut], mode='sum')

    x = MaxPooling3D((2,2,2))(x)

    conv2_level2_shortcut = Convolution3D(32 * k, 1, 1, 1, border_mode='same',
                                          name='conv2_level2_shortcut')(x)
    for i in range(N):
        x = conv2_block(x, k, dropout)
        nb_conv += 2

    # Add Level 2 shortcut
    x = merge([x, conv2_level2_shortcut], mode='sum')

    x = MaxPooling3D((2,2,2))(x)

    conv3_level2_shortcut = Convolution3D(64 * k, 1, 1, 1, border_mode='same',
                                          name='conv3_level2_shortcut')(x)
    for i in range(N):
        x = conv3_block(x, k, dropout)
        nb_conv += 2

    # Add Level 2 shortcut
    x = merge([x, conv3_level2_shortcut], mode='sum')

    # Add Level 1 shortcut
    x = merge([x, conv0_level1_shortcut], mode='sum')
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    x = AveragePooling3D((8,8,8))(x)
    x = Flatten()(x)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(ip, x)

    if verbose: print("Residual-in-Residual-Network-%d-%d created." % (nb_conv, k))
    return model
コード例 #25
0
def GW_net(input_spat, block_fn_spc, block_fn, repetitions1, repetitions2):

    block_fn_spc = _get_block(block_fn_spc)  #basic_block_spc
    block_fn = _get_block(block_fn)  #basic_block
    conv1_spc = _conv_bn_relu_spc(
        nb_filter=32,
        kernel_dim1=1,
        kernel_dim2=1,
        kernel_dim3=7,
        subsample=(1, 1, 2))(input_spat)  #input of the spectral 3DSERes block
    nb_filter = 32
    for i, r in enumerate(repetitions1):
        block_spc = _residual_block_spc(block_fn_spc,
                                        nb_filter=nb_filter,
                                        repetitions=r,
                                        is_first_layer=(i == 0))(conv1_spc)
        nb_filter = nb_filter * 2
    block_output_spc = _bn_relu_spc(
        block_spc)  # output of the spectral 3DSERes block

    conv_spc_results = _conv_bn_relu_spc(
        nb_filter=32,
        kernel_dim1=1,
        kernel_dim2=1,
        kernel_dim3=block_output_spc._keras_shape[3])(block_output_spc)
    block_in = Reshape(
        (conv_spc_results._keras_shape[1], conv_spc_results._keras_shape[2],
         conv_spc_results._keras_shape[4],
         1))(conv_spc_results)  #input of the spatial 3DSERes block

    conv2_spc = _conv_bn_relu(nb_filter=32,
                              kernel_dim1=3,
                              kernel_dim2=3,
                              kernel_dim3=32,
                              subsample=(1, 1, 1))(block_in)
    nb_filter = 32
    for i, r in enumerate(repetitions2):
        block = _residual_block(block_fn,
                                nb_filter=nb_filter,
                                repetitions=r,
                                is_first_layer=(i == 0))(conv2_spc)
        nb_filter = nb_filter * 2
    block_output = _bn_relu(block)
    pool2 = AveragePooling3D(
        pool_size=(
            block._keras_shape[1],
            block._keras_shape[2],
            block._keras_shape[3],
        ),
        strides=(1, 1, 1))(block_output)  #output of the spatial 3DSERes block

    #Feature aggregation
    flatten1 = Flatten()(pool2)
    dense = L.Dense(units=NUM_CLASS,
                    activation="softmax",
                    kernel_initializer="he_normal")(flatten1)
    inputs = input_spat

    model = K.models.Model(inputs=inputs, outputs=dense)
    RMS = K.optimizers.Adam(lr=1e-4, decay=1e-6)
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMS,
                  metrics=['accuracy'])

    return model
コード例 #26
0
    def build(input_shape, output_shape, block_fn, repetitions, reg_factor,
              mode, use_shortcut, use_colearn):
        """Instantiate a vanilla ResNet3D keras model.

        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'
            (filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'
            output_shape: Tuple of output shape
            block_fn: Unit block to use {'basic_block', 'bottlenack_block'}
            repetitions: Repetitions of unit blocks
            mode: detection or segmentation
        # Returns
            model: a 3D ResNet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """

        _handle_data_format()
        Resnet3DBuilder.iteration += 1
        Resnet3DBuilder.use_shortcut = use_shortcut
        Resnet3DBuilder.use_colearn = use_colearn
        print('Model number for session:', Resnet3DBuilder.iteration)

        if mode == 'localisation':
            input = [Input(shape=input_shape[0]), Input(shape=input_shape[1])]
            main_input = input[0]
            num_channels = input_shape[0][CHANNEL_AXIS - 1]

        elif mode == 'detection' or mode == 'segmentation':
            input = Input(shape=input_shape)
            main_input = input
            num_channels = input_shape[CHANNEL_AXIS - 1]

        encoder_outputs = Resnet3DBuilder.build_encoders(
            main_input, num_channels, block_fn, repetitions, reg_factor, mode)
        colearn_outputs = Resnet3DBuilder.get_colearning_blocks(
            encoder_outputs, reg_factor, mode)

        #print(colearn_outputs)

        decoder_output = Resnet3DBuilder.build_decoder(colearn_outputs,
                                                       block_fn, repetitions,
                                                       reg_factor, mode)

        #print(decoder_output)

        if mode == 'detection':
            # average poll and classification
            pool2 = AveragePooling3D(
                pool_size=(decoder_output._keras_shape[DIM1_AXIS],
                           decoder_output._keras_shape[DIM2_AXIS],
                           decoder_output._keras_shape[DIM3_AXIS]),
                strides=(1, 1, 1))(decoder_output)

            flatten1 = Flatten()(pool2)

            dense_centre = Dense(units=output_shape[0],
                                 kernel_initializer="he_normal",
                                 activation='linear',
                                 kernel_regularizer=l2(reg_factor))(flatten1)

            model = Model(inputs=input, outputs=dense_centre)

        elif mode == 'localisation':

            # average poll and classification
            decoder_output = AveragePooling3D(
                pool_size=(decoder_output._keras_shape[DIM1_AXIS],
                           decoder_output._keras_shape[DIM2_AXIS],
                           decoder_output._keras_shape[DIM3_AXIS]),
                strides=(1, 1, 1))(decoder_output)

            #print(decoder_output)

            flatten = Flatten()(decoder_output)

            #centre prediction
            dense_centre = Dense(units=output_shape[0],
                                 kernel_initializer="he_normal",
                                 activation='linear',
                                 kernel_regularizer=l2(reg_factor))(flatten)

            dense_centre = Add()([dense_centre,
                                  input[1]])  #predicted centre added here

            model = Model(inputs=input, outputs=dense_centre)

        elif mode == 'segmentation':
            # deconvolution time o_O;
            # approximately the same layer structure as the convolution half but in reverse

            deconv = Conv3D(filters=1,
                            kernel_size=(1, 1, 1),
                            strides=(1, 1, 1),
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(reg_factor))(decoder_output)
            deconv = squeeze(-1)(deconv)

            final_activation = Activation('sigmoid',
                                          name='final_activation')(deconv)

            model = Model(inputs=input, outputs=[final_activation])

        else:
            model = None
            print('Wrong mode selected:', mode)

        return model
コード例 #27
0
    def build(input_shape, num_outputs, block_fn_spc, block_fn, repetitions1,
              repetitions2):
        # ResnetBuilder.build(input_shape, num_outputs, basic_block_spc, basic_block, [result], [result])
        """Builds a custom ResNet like architecture.
        Args:
            input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
            num_outputs: The number of outputs at final softmax layer
            block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
                The original paper used basic_block for layers < 50
            repetitions: Number of repetitions of various block units.
                At each block unit, the number of filters are doubled and the input size is halved
        Returns:
            The keras `Model`.
        """

        print('original input shape:', input_shape)
        _handle_dim_ordering()
        if len(input_shape) != 4:
            raise Exception(
                "Input shape should be a tuple (nb_channels, kernel_dim1, kernel_dim2, kernel_dim3)"
            )

        print('original input shape:', input_shape)
        # orignal input shape: result,7,7,200

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[2], input_shape[3],
                           input_shape[0])
        print('change input shape:', input_shape)
        # change input shape: 7,7,200,result

        # Load function from str if needed.
        block_fn_spc = _get_block(block_fn_spc)
        block_fn = _get_block(block_fn)

        input = Input(shape=input_shape)
        print(input)
        # Tensor("input_1:0", shape=(?, 7, 7, 200, result), dtype=float32)
        print('#' * 30)
        print("input shape result:", input._keras_shape[0])
        print("input shape 2:", input._keras_shape[1])
        print("input shape 3:", input._keras_shape[2])
        print("input shape 4:", input._keras_shape[3])
        print("input shape 5:", input._keras_shape[4])
        # input shape result: None
        # input shape 2: 7
        # input shape 3: 7
        # input shape 4: 200
        # input shape 5: result

        # CONV + BN +RELU
        conv1_spc = _conv_bn_relu_spc(nb_filter=24,
                                      kernel_dim1=1,
                                      kernel_dim2=1,
                                      kernel_dim3=7,
                                      subsample=(1, 1, 2))(input)
        print('conv1_spc:', conv1_spc)

        block_spc = conv1_spc

        nb_filter = 24
        # repetitions=[result]  i,r = 0,result
        for i, r in enumerate(repetitions1):
            block_spc = _residual_block_spc(block_fn_spc,
                                            nb_filter=nb_filter,
                                            repetitions=r,
                                            is_first_layer=(i == 0))(block_spc)
            nb_filter *= 2

        # Last activation
        # BN + RELU
        block_spc = _bn_relu_spc(block_spc)

        block_norm_spc = BatchNormalization(axis=CHANNEL_AXIS)(block_spc)
        block_output_spc = Activation("relu")(block_norm_spc)

        conv_spc_results = _conv_bn_relu_spc(
            nb_filter=128,
            kernel_dim1=1,
            kernel_dim2=1,
            kernel_dim3=block_output_spc._keras_shape[CONV_DIM3])(
                block_output_spc)

        print('block_output_spc.kernel_dim3:',
              block_output_spc._keras_shape[CONV_DIM3])
        print("conv_spc_result shape:", conv_spc_results._keras_shape)

        conv2_spc = Reshape(
            (conv_spc_results._keras_shape[CONV_DIM1],
             conv_spc_results._keras_shape[CONV_DIM2],
             conv_spc_results._keras_shape[CHANNEL_AXIS], 1))(conv_spc_results)
        print(conv2_spc)

        conv1 = _conv_bn_relu(nb_filter=24,
                              kernel_dim1=3,
                              kernel_dim2=3,
                              kernel_dim3=128,
                              subsample=(1, 1, 1))(conv2_spc)
        print("conv1 shape:", conv1._keras_shape)

        block = conv1
        nb_filter = 24
        for i, r in enumerate(repetitions2):
            block = _residual_block(block_fn,
                                    nb_filter=nb_filter,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            nb_filter *= 2

        block = _bn_relu(block)

        block_norm = BatchNormalization(axis=CHANNEL_AXIS)(block)
        block_output = Activation("relu")(block_norm)

        pool2 = AveragePooling3D(pool_size=(
            block._keras_shape[CONV_DIM1],
            block._keras_shape[CONV_DIM2],
            block._keras_shape[CONV_DIM3],
        ),
                                 strides=(1, 1, 1))(block_output)
        flatten1 = Flatten()(pool2)
        drop1 = Dropout(0.5)(flatten1)
        dense = Dense(units=num_outputs,
                      activation="softmax",
                      kernel_initializer="he_normal")(drop1)

        model = Model(inputs=input, outputs=dense)
        model.summary()
        return model
コード例 #28
0
def densenet_baseline_2(num_channel):
    """
    The stronger baseline (regular) DenseNet with two DenseNet blocks
    Each block has 4 small conv_bn_relu blocks
    At the end of each block, there are two 1x1x1 convolution layers to have similar number of parameters as MR-3D-DenseNet
    No siginificant difference observed in comparison to baseline_1
    num_channel: number of channels
    """

    # input layer
    inp = Input((16, 16, 16, num_channel))
    # first transition layer
    x = conv_bn_relu(inp, 64, 3, "same")
    # first block
    x1 = conv_bn_relu(x, 256, 1, "same")
    x1 = conv_bn_relu(x1, 64, 1, "same")
    x1 = conv_bn_relu(x1, 64, 3, "same")
    x2 = Concatenate()([x, x1])
    x2 = conv_bn_relu(x2, 256, 1, "same")
    x2 = conv_bn_relu(x2, 64, 1, "same")
    x2 = conv_bn_relu(x2, 64, 3, "same")
    x3 = Concatenate()([x, x1, x2])
    x3 = conv_bn_relu(x3, 256, 1, "same")
    x3 = conv_bn_relu(x3, 64, 1, "same")
    x3 = conv_bn_relu(x3, 64, 3, "same")
    x4 = Concatenate()([x, x1, x2, x3])
    x4 = conv_bn_relu(x4, 256, 1, "same")
    x4 = conv_bn_relu(x4, 64, 1, "same")
    x4 = conv_bn_relu(x4, 64, 3, "same")
    x = AveragePooling3D(2)(x4)
    x = conv_bn_relu(x, 256, 1, "same")
    x = conv_bn_relu(x, 64, 1, "same")

    # second transition layer
    x = conv_bn_relu(x, 64, 1, "same")
    # second block
    x1 = conv_bn_relu(x, 256, 1, "same")
    x1 = conv_bn_relu(x1, 64, 1, "same")
    x1 = conv_bn_relu(x1, 64, 3, "same")
    x2 = Concatenate()([x, x1])
    x2 = conv_bn_relu(x2, 256, 1, "same")
    x2 = conv_bn_relu(x2, 64, 1, "same")
    x2 = conv_bn_relu(x2, 64, 3, "same")
    x3 = Concatenate()([x, x1, x2])
    x3 = conv_bn_relu(x3, 256, 1, "same")
    x3 = conv_bn_relu(x3, 64, 1, "same")
    x3 = conv_bn_relu(x3, 64, 3, "same")
    x4 = Concatenate()([x, x1, x2, x3])
    x4 = conv_bn_relu(x4, 256, 1, "same")
    x4 = conv_bn_relu(x4, 64, 1, "same")
    x4 = conv_bn_relu(x4, 64, 3, "same")
    x = AveragePooling3D(2)(x4)
    x = conv_bn_relu(x, 256, 1, "same")
    x = conv_bn_relu(x, 64, 1, "same")

    # final block
    x = Flatten()(x)
    x = dense_bn_relu(x, 256, 0.1)
    x = dense_bn_relu(x, 128, 0.1)
    y = Dense(1)(x)
    return Model(inp, y)
コード例 #29
0
    def build(input_shape, num_outputs, block_fn_spc, block_fn, repetitions1,
              repetitions2):
        """Builds a custom ResNet like architecture.
        Args:
            input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
            num_outputs: The number of outputs at final softmax layer
            block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
                The original paper used basic_block for layers < 50
            repetitions: Number of repetitions of various block units.
                At each block unit, the number of filters are doubled and the input size is halved
        Returns:
            The keras `Model`.
        """
        _handle_dim_ordering()
        if len(input_shape) != 4:
            raise Exception(
                "Input shape should be a tuple (nb_channels, kernel_dim1, kernel_dim2, kernel_dim3)"
            )

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[2], input_shape[3],
                           input_shape[0])

        # Load function from str if needed.
        block_fn_spc = _get_block(block_fn_spc)
        block_fn = _get_block(block_fn)

        input = Input(shape=input_shape)
        print("input shape:", input._keras_shape[3])

        conv1_spc = _conv_bn_relu_spc(nb_filter=24,
                                      kernel_dim1=1,
                                      kernel_dim2=1,
                                      kernel_dim3=7,
                                      subsample=(1, 1, 2))(input)

        block_spc = conv1_spc
        nb_filter = 24
        for i, r in enumerate(repetitions1):
            block_spc = _residual_block_spc(block_fn_spc,
                                            nb_filter=nb_filter,
                                            repetitions=r,
                                            is_first_layer=(i == 0))(block_spc)
            nb_filter *= 2

        # Last activation
        block_spc = _bn_relu_spc(block_spc)

        block_norm_spc = BatchNormalization(axis=CHANNEL_AXIS)(block_spc)
        block_output_spc = Activation("relu")(block_norm_spc)

        conv_spc_results = _conv_bn_relu_spc(
            nb_filter=128,
            kernel_dim1=1,
            kernel_dim2=1,
            kernel_dim3=block_output_spc._keras_shape[CONV_DIM3])(
                block_output_spc)

        print("conv_spc_result shape:", conv_spc_results._keras_shape)

        conv2_spc = Reshape(
            (conv_spc_results._keras_shape[CONV_DIM1],
             conv_spc_results._keras_shape[CONV_DIM2],
             conv_spc_results._keras_shape[CHANNEL_AXIS], 1))(conv_spc_results)

        conv1 = _conv_bn_relu(nb_filter=24,
                              kernel_dim1=3,
                              kernel_dim2=3,
                              kernel_dim3=128,
                              subsample=(1, 1, 1))(conv2_spc)
        #conv1 = _conv_bn_relu(nb_filter=32, kernel_dim1=3, kernel_dim2=3, kernel_dim3=input._keras_shape[3], subsample=(1, 1, 1))(input)
        #pool1 = MaxPooling3D(pool_size=(3, 3, 1), strides=(2, 2, 1), border_mode="same")(conv1)
        #conv1 = Convolution3D(nb_filter=32, kernel_dim1=3, kernel_dim2=3, kernel_dim3=176,subsample=(1,1,1))(input)
        print("conv1 shape:", conv1._keras_shape)

        block = conv1
        nb_filter = 24
        for i, r in enumerate(repetitions2):
            block = _residual_block(block_fn,
                                    nb_filter=nb_filter,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            nb_filter *= 2

        # Last activation
        block = _bn_relu(block)

        block_norm = BatchNormalization(axis=CHANNEL_AXIS)(block)
        block_output = Activation("relu")(block_norm)

        # Classifier block
        pool2 = AveragePooling3D(pool_size=(
            block._keras_shape[CONV_DIM1],
            block._keras_shape[CONV_DIM2],
            block._keras_shape[CONV_DIM3],
        ),
                                 strides=(1, 1, 1))(block_output)
        flatten1 = Flatten()(pool2)
        drop1 = Dropout(0.5)(flatten1)
        dense = Dense(units=num_outputs,
                      activation="softmax",
                      kernel_initializer="he_normal")(drop1)

        model = Model(inputs=input, outputs=dense)
        return model
コード例 #30
0
def create_inception_resnet_v1(nb_classes=2, scale=True, noise_adaption=False):
    if noise_adaption:
        CONFUSION = genfromtxt(
            '/media/mccoyd2/hamburger/hemorrhage_study/results/confusion_matrix.csv',
            delimiter=',')
        CHANNEL_WEIGHTS = CONFUSION.copy()
        CHANNEL_WEIGHTS /= CHANNEL_WEIGHTS.sum(
            axis=1, keepdims=True
        )  # row-wise division of each confusion matrix item by row sum
        # take the log of the matrix and add an offset to prevent log 0 explosion
        CHANNEL_WEIGHTS = np.log(CHANNEL_WEIGHTS + 1e-8)
    '''
    Creates a inception resnet v1 network
    :param nb_classes: number of classes.txt
    :param scale: flag to add scaling of activations
    :return: Keras Model with 1 input (299x299x3) input shape and 2 outputs (final_output, auxiliary_output)
    '''

    if K.image_dim_ordering() == 'th':
        init = Input((1, 256, 256, 40))
    else:
        init = Input((256, 256, 40, 1))

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    x = inception_resnet_stem(init)

    # 5 x Inception Resnet A
    for i in range(5):
        x = inception_resnet_A(x, scale_residual=scale)

    # Reduction A - From Inception v4
    x = reduction_A(x, k=192, l=192, m=256, n=384)

    # 10 x Inception Resnet B
    for i in range(10):
        x = inception_resnet_B(x, scale_residual=scale)

    # Auxiliary tower
    aux_out = AveragePooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2))(x)
    aux_out = Conv3D(filters=128,
                     kernel_size=(1, 1, 1),
                     padding='same',
                     activation='relu')(aux_out)
    aux_out = Conv3D(filters=768, kernel_size=(3, 3, 3),
                     activation='relu')(aux_out)
    aux_out = Flatten()(aux_out)
    aux_out = Dense(nb_classes, activation='sigmoid')(aux_out)

    # Reduction Resnet B
    x = reduction_resnet_B(x)

    # 5 x Inception Resnet C
    for i in range(5):
        x = inception_resnet_C(x, scale_residual=scale)

    # Average Pooling
    x = AveragePooling3D(pool_size=(4, 4, 4))(x)

    # Dropout
    x = Dropout(DROPOUT)(x)
    x = Flatten()(x)

    # Output
    base_network_out = Dense(output_dim=nb_classes,
                             activation='sigmoid',
                             name='base_network_channel')(x)

    if noise_adaption:
        channeled_output = Channel(name='noise_adaption_channel',
                                   weights=[CHANNEL_WEIGHTS])(base_network_out)
        model = Model(input=init,
                      output=[channeled_output, base_network_out, aux_out],
                      name='Inception-Resnet-v1')
    else:
        model = Model(input=init,
                      output=[base_network_out, aux_out],
                      name='Inception-Resnet-v1')

    return model