コード例 #1
0
def create_model_8():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)
    
    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.2)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    conv1 = SpatialDropout3D(0.2)(conv1)
    conv1 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
    
    conv2 = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = SpatialDropout3D(0.2)(conv2)
    conv2 = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)

    x = Flatten()(pool2)
    x = Dense(64, init='normal')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)
        
    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=0.00001)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
コード例 #2
0
def unet_model():
    
    inputs = Input(shape=(1, max_slices, img_size, img_size))
    conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
    conv1 = BatchNormalization(axis = 1)(conv1)
    conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
    conv1 = BatchNormalization(axis = 1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), strides = (2, 2, 2), border_mode='same')(conv1)
    
    conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
    conv2 = BatchNormalization(axis = 1)(conv2)
    conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
    conv2 = BatchNormalization(axis = 1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), strides = (2, 2, 2), border_mode='same')(conv2)

    conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
    conv3 = BatchNormalization(axis = 1)(conv3)
    conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
    conv3 = BatchNormalization(axis = 1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), strides = (2, 2, 2), border_mode='same')(conv3)
    
    conv4 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(pool3)
    conv4 = BatchNormalization(axis = 1)(conv4)
    conv4 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv4)
    conv4 = BatchNormalization(axis = 1)(conv4)
    conv4 = Convolution3D(width*16, 3, 3, 3, activation = 'relu', border_mode='same')(conv4)
    conv4 = BatchNormalization(axis = 1)(conv4)

    up5 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv3], mode='concat', concat_axis=1)
    conv5 = SpatialDropout3D(dropout_rate)(up5)
    conv5 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv5)
    conv5 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv5)
    
    up6 = merge([UpSampling3D(size=(2, 2, 2))(conv5), conv2], mode='concat', concat_axis=1)
    conv6 = SpatialDropout3D(dropout_rate)(up6)
    conv6 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv6)
    conv6 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv6)

    up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1)
    conv7 = SpatialDropout3D(dropout_rate)(up7)
    conv7 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv7)
    conv7 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv7)
    conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(conv7)

    model = Model(input=inputs, output=conv8)
    model.compile(optimizer=Adam(lr=1e-5), 
                  loss=dice_coef_loss, metrics=[dice_coef])

    return model
コード例 #3
0
    def __init__(self, frame_count, image_channels=3, image_height=50, image_width=100, max_string=32, output_size=28):
        input_shape = self.get_input_shape(frame_count, image_channels, image_height, image_width)
        self.input_layer = Input(shape=input_shape, dtype='float32', name='input')

        self.zero_1 = ZeroPadding3D(padding=(1, 2, 2), name='zero_1')(self.input_layer)
        self.conv_1 = Conv3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_1')(self.zero_1)
        self.batc_1 = BatchNormalization(name='batc_1')(self.conv_1)
        self.actv_1 = Activation('relu', name='actv_1')(self.batc_1)
        self.pool_1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_1')(self.actv_1)
        self.drop_1 = SpatialDropout3D(0.5, name='drop_1')(self.pool_1)

        self.zero_2 = ZeroPadding3D(padding=(1, 2, 2), name='zero_2')(self.drop_1)
        self.conv_2 = Conv3D(64, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_2')(self.zero_2)
        self.batc_2 = BatchNormalization(name='batc_2')(self.conv_2)
        self.actv_2 = Activation('relu', name='actv_2')(self.batc_2)
        self.pool_2 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_2')(self.actv_2)
        self.drop_2 = SpatialDropout3D(0.5, name='drop_2')(self.pool_2)

        self.zero_3 = ZeroPadding3D(padding=(1, 1, 1), name='zero_3')(self.drop_2)
        self.conv_3 = Conv3D(96, (3, 3, 3), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_3')(self.zero_3)
        self.batc_3 = BatchNormalization(name='batc_3')(self.conv_3)
        self.actv_3 = Activation('relu', name='actv_3')(self.batc_3)
        self.pool_3 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_3')(self.actv_3)
        self.drop_3 = SpatialDropout3D(0.5, name='drop_3')(self.pool_3)

        self.res = TimeDistributed(Flatten())(self.drop_3)

        self.gru_1 = Bidirectional(GRU(256, return_sequences=True, activation=None, kernel_initializer='Orthogonal', name='gru_1'), merge_mode='concat')(self.res)
        self.gru_1_actv = Activation('relu', name='gru_1_actv')(self.gru_1)
        self.gru_2 = Bidirectional(GRU(256, return_sequences=True, activation=None, kernel_initializer='Orthogonal', name='gru_2'), merge_mode='concat')(self.gru_1_actv)
        self.gru_2_actv = Activation('relu', name='gru_2_actv')(self.gru_2)

        self.dense_1 = Dense(output_size, kernel_initializer='he_normal', name='dense_1')(self.gru_2_actv)
        self.y_pred  = Activation('softmax', name='softmax')(self.dense_1)

        self.input_labels = Input(shape=[max_string], dtype='float32', name='labels')
        self.input_length = Input(shape=[1], dtype='int64', name='input_length')
        self.label_length = Input(shape=[1], dtype='int64', name='label_length')

        self.loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([self.y_pred, self.input_labels, self.input_length, self.label_length])

        self.model = Model(inputs=[self.input_layer, self.input_labels, self.input_length, self.label_length], outputs=self.loss_out)
コード例 #4
0
def video_model_b(input_dim, output_dim):
    model = Sequential()
    model.add(SpatialDropout3D(0.2, input_shape=(5, input_dim, input_dim, 3)))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(64, 3, 3, 3, activation='relu'))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(64, 3, 3, 3, activation='relu'))
    model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

    model.add(SpatialDropout3D(0.2))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(128, 3, 3, 3, activation='relu'))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(128, 3, 3, 3, activation='relu'))
    model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(output_dim=output_dim, activation='linear'))
    return model
コード例 #5
0
    def build(self):
        if K.image_data_format() == 'channels_first':
            input_shape = (self.img_c, self.frames_n, self.img_w, self.img_h)
        else:
            input_shape = (self.frames_n, self.img_w, self.img_h, self.img_c)

        self.input_data = Input(name='the_input',
                                shape=input_shape,
                                dtype='float32')

        self.zero1 = ZeroPadding3D(padding=(1, 2, 2),
                                   name='zero1')(self.input_data)
        self.conv1 = Conv3D(32, (3, 5, 5),
                            strides=(1, 2, 2),
                            kernel_initializer='he_normal',
                            name='conv1')(self.zero1)
        self.batc1 = BatchNormalization(name='batc1')(self.conv1)
        self.actv1 = Activation('relu', name='actv1')(self.batc1)
        self.drop1 = SpatialDropout3D(0.5)(self.actv1)
        self.maxp1 = MaxPooling3D(pool_size=(1, 2, 2),
                                  strides=(1, 2, 2),
                                  name='max1')(self.drop1)

        self.zero2 = ZeroPadding3D(padding=(1, 2, 2), name='zero2')(self.maxp1)
        self.conv2 = Conv3D(64, (3, 5, 5),
                            strides=(1, 1, 1),
                            kernel_initializer='he_normal',
                            name='conv2')(self.zero2)
        self.batc2 = BatchNormalization(name='batc2')(self.conv2)
        self.actv2 = Activation('relu', name='actv2')(self.batc2)
        self.drop2 = SpatialDropout3D(0.5)(self.actv2)
        self.maxp2 = MaxPooling3D(pool_size=(1, 2, 2),
                                  strides=(1, 2, 2),
                                  name='max2')(self.drop2)

        self.zero3 = ZeroPadding3D(padding=(1, 1, 1), name='zero3')(self.maxp2)
        self.conv3 = Conv3D(96, (3, 3, 3),
                            strides=(1, 1, 1),
                            kernel_initializer='he_normal',
                            name='conv3')(self.zero3)
        self.batc3 = BatchNormalization(name='batc3')(self.conv3)
        self.actv3 = Activation('relu', name='actv3')(self.batc3)
        self.drop3 = SpatialDropout3D(0.5)(self.actv3)
        self.maxp3 = MaxPooling3D(pool_size=(1, 2, 2),
                                  strides=(1, 2, 2),
                                  name='max3')(self.drop3)

        self.resh1 = TimeDistributed(Flatten())(self.maxp3)

        self.gru_1 = Bidirectional(GRU(256,
                                       return_sequences=True,
                                       kernel_initializer='Orthogonal',
                                       name='gru1'),
                                   merge_mode='concat')(self.resh1)
        self.gru_2 = Bidirectional(GRU(256,
                                       return_sequences=True,
                                       kernel_initializer='Orthogonal',
                                       name='gru2'),
                                   merge_mode='concat')(self.gru_1)

        # transforms RNN output to character activations:
        self.dense1 = Dense(self.output_size,
                            kernel_initializer='he_normal',
                            name='dense1')(self.gru_2)

        self.y_pred = Activation('softmax', name='softmax')(self.dense1)

        self.labels = Input(name='the_labels',
                            shape=[self.absolute_max_string_len],
                            dtype='float32')
        self.input_length = Input(name='input_length',
                                  shape=[1],
                                  dtype='int64')
        self.label_length = Input(name='label_length',
                                  shape=[1],
                                  dtype='int64')

        self.loss_out = CTC(
            'ctc',
            [self.y_pred, self.labels, self.input_length, self.label_length])

        self.model = Model(inputs=[
            self.input_data, self.labels, self.input_length, self.label_length
        ],
                           outputs=self.loss_out)
コード例 #6
0
ファイル: layers.py プロジェクト: sc-AhmedAttia/lipnet-1
def create_drop_layer(name: str, input_layer) -> SpatialDropout3D:
    return SpatialDropout3D(DROPOUT_RATE, name=name)(input_layer)
コード例 #7
0
 filterSizeXYL2 = 5  #ukuran filter dimensi spasial
 filterSizeTL2 = 3  #ukuran filter dimensi spasial
 model = Sequential()
 model.add(
     Convolution3D(filterNumL1,
                   kernel_dim1=filterSizeTL1,
                   kernel_dim2=filterSizeXYL1,
                   kernel_dim3=filterSizeXYL1,
                   input_shape=(RDepth, R1y, R1x, 3),
                   activation='relu',
                   dim_ordering='tf'))
 #model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
 model.add(
     MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1),
                  dim_ordering='tf'))
 model.add(SpatialDropout3D(0.3))
 model.add(
     Convolution3D(filterNumL2,
                   kernel_dim1=filterSizeTL2,
                   kernel_dim2=filterSizeXYL2,
                   kernel_dim3=filterSizeXYL2,
                   activation='relu',
                   dim_ordering='tf'))
 # model.add(Convolution3D(filterNumL2,kernel_dim1=1, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
 #model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
 model.add(
     MaxPooling3D(pool_size=(2, poolingSizeXYL1, poolingSizeTL1),
                  dim_ordering='tf'))
 model.add(SpatialDropout3D(0.3))
 model.add(
     Convolution3D(128,
コード例 #8
0
    def _build_layers(self, x):
        # VERTICAL -- grows as a cube, not conditioned on any other stacks
        v_masked_map = self._masked_conv(x, self.filter_size_1st, 'vertical',
                                         0)
        v_masked_map = SpatialDropout3D(rate=self.dropout_rate)(
            v_masked_map, training=self.training_dropout)
        v_stack_out = GatedCNN3D(self.nb_filters, 'vertical',
                                 d_map=None)(v_masked_map, 0)
        # This is the shifted version of the vertical map, to be use in the depth and horizontal stacks
        v_feed_map = self._feed_v_map(v_masked_map, 0)

        # DEPTH - grows as a rectangle, conditioned on the vertical stack
        d_masked_map = self._masked_conv(x, self.filter_size_1st, 'depth', 0)
        d_masked_map = SpatialDropout3D(rate=self.dropout_rate)(
            d_masked_map, training=self.training_dropout)
        d_stack_out = GatedCNN3D(self.nb_filters, 'depth',
                                 d_map=v_feed_map)(d_masked_map, 0)
        # This is the shifted version of the depth map, to be used in the horizontal stack
        d_feed_map = self._feed_d_map(d_stack_out, 0)
        # make it have a matching number of filters
        d_stack_out = Convolution3D(self.nb_filters,
                                    1,
                                    padding='valid',
                                    name='v_1x1x1_conv_0')(d_stack_out)
        d_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
            d_stack_out, training=self.training_dropout)

        # HORIZONTAL
        h_masked_map = self._masked_conv(x, self.filter_size_1st, 'horizontal',
                                         0, 'A')
        h_masked_map = SpatialDropout3D(rate=self.dropout_rate)(
            h_masked_map, training=self.training_dropout)
        # horizontal stack takes in depth and vertical stacks
        # because we used a Mask of type A, now we have to crop the center element as well
        h_stack_out = GatedCNN3D(self.nb_filters,
                                 'horizontal',
                                 v_map=v_feed_map,
                                 d_map=d_feed_map,
                                 crop_right=True)(h_masked_map, 0)
        # no residual connection in the first layer.
        h_stack_out = Convolution3D(self.nb_filters,
                                    1,
                                    padding='valid',
                                    name='h_1x1x1_conv_0')(h_stack_out)
        h_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
            h_stack_out, training=self.training_dropout)

        # subsequent PixelCNN layers
        for i in range(1, self.nb_pixelcnn_layers):
            # VERTICAL
            v_masked_map = self._masked_conv(v_stack_out, self.filter_size,
                                             'vertical', i)
            v_masked_map = SpatialDropout3D(rate=self.dropout_rate)(
                v_masked_map, training=self.training_dropout)
            v_stack_out = GatedCNN3D(self.nb_filters, 'vertical',
                                     d_map=None)(v_masked_map, i)
            v_feed_map = self._feed_v_map(v_masked_map, i)

            # DEPTH
            d_stack_out_prev = d_stack_out
            d_masked_map = self._masked_conv(d_stack_out, self.filter_size,
                                             'depth', i)
            d_masked_map = SpatialDropout3D(rate=self.dropout_rate)(
                d_masked_map, training=self.training_dropout)
            d_stack_out = GatedCNN3D(self.nb_filters,
                                     'depth',
                                     d_map=v_feed_map)(d_masked_map, i)
            d_feed_map = self._feed_d_map(d_stack_out, i)

            d_stack_out = Convolution3D(self.nb_filters,
                                        1,
                                        padding='valid',
                                        name='v_1x1x1_conv_' +
                                        str(i))(d_stack_out)
            d_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
                d_stack_out, training=self.training_dropout)
            # Add a residual connection to the previous depth stack
            d_stack_out = add([d_stack_out, d_stack_out_prev],
                              name='d_residual_' + str(i))

            # HORIZONTAL
            # use this shortcut for residual connection
            h_stack_out_prev = h_stack_out
            h_masked_map = self._masked_conv(h_stack_out, self.filter_size,
                                             'horizontal', i)
            h_masked_map = SpatialDropout3D(rate=self.dropout_rate)(
                h_masked_map, training=self.training_dropout)
            # Now we are using Mask B no need to crop the center pixel
            h_stack_out = GatedCNN3D(self.nb_filters,
                                     'horizontal',
                                     v_map=v_feed_map,
                                     d_map=d_feed_map)(h_masked_map, i)
            h_stack_out = Convolution3D(self.nb_filters,
                                        1,
                                        padding='valid',
                                        name='h_1x1x1_conv_' +
                                        str(i))(h_stack_out)
            h_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
                h_stack_out, training=self.training_dropout)
            # Add a residual connection to the previous horizontal stack
            h_stack_out = add([h_stack_out, h_stack_out_prev],
                              name='h_residual_' + str(i))

        # FINAL LAYERS
        h_stack_out = Convolution3D(self.nb_filters,
                                    1,
                                    activation='relu',
                                    padding='valid',
                                    name='penultimate_convs0')(h_stack_out)
        h_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
            h_stack_out, training=self.training_dropout)
        h_stack_out = Convolution3D(self.nb_filters,
                                    1,
                                    activation='relu',
                                    padding='valid',
                                    name='penultimate_convs1')(h_stack_out)
        h_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
            h_stack_out, training=self.training_dropout)

        # We're using a low number of filters at the end, so that embeddings are not massive
        h_stack_out = Convolution3D(10,
                                    1,
                                    activation='relu',
                                    padding='valid',
                                    name='penultimate_convs2')(h_stack_out)
        h_stack_out = SpatialDropout3D(rate=self.dropout_rate)(
            h_stack_out, training=self.training_dropout)

        # Finally project it back into the original volume domain
        res = Convolution3D(1, 1, activation='sigmoid',
                            padding='valid')(h_stack_out)
        return res
    filterNumL2 = 64  # jumlah filter L1
    filterSizeXYL2 = 5  #ukuran filter dimensi spasial
    filterSizeTL2 = 5  #ukuran filter dimensi spasial

    modelB_In = Input(shape=(RDepth, R1y, R1x, 3))
    modelB = Convolution3D(16,
                           kernel_dim1=3,
                           kernel_dim2=5,
                           kernel_dim3=5,
                           input_shape=(RDepth, R1y, R1x, 3),
                           activation='relu',
                           dim_ordering='tf')(modelB_In)
    #model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
    modelB = MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1),
                          dim_ordering='tf')(modelB)
    modelB = SpatialDropout3D(0.3)(modelB)
    modelB = Convolution3D(filterNumL2,
                           kernel_dim1=3,
                           kernel_dim2=5,
                           kernel_dim3=5,
                           activation='relu',
                           dim_ordering='tf')(modelB)
    # model.add(Convolution3D(filterNumL2,kernel_dim1=1, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
    #model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
    modelB = MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1),
                          dim_ordering='tf')(modelB)
    modelB = SpatialDropout3D(0.3)(modelB)
    modelB = Convolution3D(filterNumL2,
                           kernel_dim1=3,
                           kernel_dim2=5,
                           kernel_dim3=5,
コード例 #10
0
    def build(self):
        if K.image_data_format() == 'channels_first':
            input_shape = (self.img_c, self.frames_n, self.img_h, self.img_w)
        else:
            input_shape = (self.frames_n, self.img_h, self.img_w, self.img_c
                           )  # our case

        self.input_data = Input(name='input',
                                shape=input_shape,
                                dtype='float32')

        # 70 x 80 x 100 x 3
        self.zero1 = ZeroPadding3D(padding=(1, 2, 2),
                                   name='zero1')(self.input_data)
        self.conv1 = Conv3D(32, (3, 5, 5),
                            strides=(1, 2, 2),
                            kernel_initializer='he_normal',
                            name='conv_1')(self.zero1)
        # 70 x 40 x 50 x 32
        self.batch1 = BatchNormalization(name='batch1')(self.conv1)
        self.actv1 = Activation('relu', name='actv1')(self.batch1)
        self.maxp1 = MaxPooling3D(pool_size=(1, 2, 2),
                                  strides=(1, 2, 2),
                                  name='max1')(self.actv1)
        # 70 x 20 x 25 x 32
        self.drop1 = SpatialDropout3D(0.5)(self.maxp1)

        self.zero2 = ZeroPadding3D(padding=(1, 2, 2), name='zero2')(self.drop1)
        self.conv2 = Conv3D(64, (3, 5, 5),
                            strides=(1, 1, 1),
                            kernel_initializer='he_normal',
                            name='conv_2')(self.zero2)
        # 70 x 20 x 25 x 64
        self.batch2 = BatchNormalization(name='batch2')(self.conv2)
        self.actv2 = Activation('relu', name='actv2')(self.batch2)
        self.maxp2 = MaxPooling3D(pool_size=(1, 2, 2),
                                  strides=(1, 2, 2),
                                  name='max2')(self.actv2)
        # 70 x 10 x 12 x 64
        self.drop2 = SpatialDropout3D(0.5)(self.maxp2)

        self.zero3 = ZeroPadding3D(padding=(1, 1, 1), name='zero3')(self.drop2)
        self.conv3 = Conv3D(96, (3, 3, 3),
                            strides=(1, 1, 1),
                            kernel_initializer='he_normal',
                            name='conv3')(self.zero3)
        # 70 x 10 x 12 x 96
        self.batch3 = BatchNormalization(name='batch3')(self.conv3)
        self.actv3 = Activation('relu', name='actv3')(self.batch3)
        self.maxp3 = MaxPooling3D(pool_size=(1, 2, 2),
                                  strides=(1, 2, 2),
                                  name='max3')(self.actv3)
        # 70 x 5 x 6 x 96
        self.drop3 = SpatialDropout3D(0.5)(self.maxp3)

        self.resh1 = TimeDistributed(Flatten())(self.drop3)
        # 70 x (5 x 6 x 96) = 70 x 2880

        self.gru_1 = Bidirectional(GRU(256,
                                       return_sequences=True,
                                       kernel_initializer='Orthogonal',
                                       name='gru1'),
                                   merge_mode='concat')(self.resh1)
        # 70 x (256 x 2) = 70 x 512
        self.gru_2 = Bidirectional(GRU(256,
                                       kernel_initializer='Orthogonal',
                                       name='gru2'),
                                   merge_mode='concat')(self.gru_1)
        # 512

        # self.gru_1 = Bidirectional(GRU(256, kernel_initializer='Orthogonal', name='gru1'), merge_mode='concat')(self.resh1)
        # 512

        # transforms RNN output to classification:
        self.prediction = Dense(units=self.output_n,
                                activation='softmax',
                                kernel_initializer='he_normal',
                                name='predict')(self.gru_2)
        # 70 x (10)

        self.model = Model(inputs=self.input_data, outputs=self.prediction)
コード例 #11
0
def build_model(input_shape):

    xin = Input(input_shape)

    #shift the below down by one
    x1 = conv_block(xin, 8, activation='relu')  #outputs 13 ch
    x1_ident = AveragePooling3D()(xin)
    x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1)

    x2_1 = conv_block(x1_merged, 24, activation='crelu')  #outputs 37 ch
    x2_ident = AveragePooling3D()(x1_ident)
    x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1)

    #by branching we reduce the #params
    x3_ident = AveragePooling3D()(x2_ident)

    x3_diam = conv_block(x2_merged, 36,
                         activation='relu')  #outputs 25 + 16 ch = 41
    x3_lob = conv_block(x2_merged, 36,
                        activation='relu')  #outputs 25 + 16 ch = 41
    x3_spic = conv_block(x2_merged, 36,
                         activation='relu')  #outputs 25 + 16 ch = 41
    x3_malig = conv_block(x2_merged, 36,
                          activation='relu')  #outputs 25 + 16 ch = 41

    x3_diam = SpatialDropout3D(2.0 / 36.0)(x3_diam)
    x3_lob = SpatialDropout3D(2.0 / 36.0)(x3_lob)
    x3_spic = SpatialDropout3D(2.0 / 36.0)(x3_spic)
    x3_malig = SpatialDropout3D(2.0 / 36.0)(x3_malig)

    x3_diam_merged = merge([x3_diam, x3_ident], mode='concat', concat_axis=1)
    x3_lob_merged = merge([x3_lob, x3_ident], mode='concat', concat_axis=1)
    x3_spic_merged = merge([x3_spic, x3_ident], mode='concat', concat_axis=1)
    x3_malig_merged = merge([x3_malig, x3_ident], mode='concat', concat_axis=1)

    x4_ident = AveragePooling3D()(x3_ident)
    x4_diam = conv_block(x3_diam_merged, 48,
                         activation='relu')  #outputs 25 + 16 ch = 41
    x4_lob = conv_block(x3_lob_merged, 48,
                        activation='relu')  #outputs 25 + 16 ch = 41
    x4_spic = conv_block(x3_spic_merged, 48,
                         activation='relu')  #outputs 25 + 16 ch = 41
    x4_malig = conv_block(x3_malig_merged, 48,
                          activation='relu')  #outputs 25 + 16 ch = 41

    x4_diam = SpatialDropout3D(4.0 / 48.0)(x4_diam)
    x4_lob = SpatialDropout3D(4.0 / 48.0)(x4_lob)
    x4_spic = SpatialDropout3D(4.0 / 48.0)(x4_spic)
    x4_malig = SpatialDropout3D(4.0 / 48.0)(x4_malig)

    x4_diam_merged = merge([x4_diam, x4_ident], mode='concat', concat_axis=1)
    x4_lob_merged = merge([x4_lob, x4_ident], mode='concat', concat_axis=1)
    x4_spic_merged = merge([x4_spic, x4_ident], mode='concat', concat_axis=1)
    x4_malig_merged = merge([x4_malig, x4_ident], mode='concat', concat_axis=1)

    x5_diam = conv_block(x4_diam_merged, 64,
                         pool=False)  #outputs 25 + 16 ch = 41
    x5_lob = conv_block(x4_lob_merged, 64,
                        pool=False)  #outputs 25 + 16 ch = 41
    x5_spic = conv_block(x4_spic_merged, 64,
                         pool=False)  #outputs 25 + 16 ch = 41
    x5_malig = conv_block(x4_malig_merged, 64,
                          pool=False)  #outputs 25 + 16 ch = 41

    x5_diam = SpatialDropout3D(8.0 / 64.0)(x5_diam)
    x5_lob = SpatialDropout3D(8.0 / 64.0)(x5_lob)
    x5_spic = SpatialDropout3D(8.0 / 64.0)(x5_spic)
    x5_malig = SpatialDropout3D(8.0 / 64.0)(x5_malig)

    xpool_diam = BatchNormalization()(GlobalMaxPooling3D()(x5_diam))
    xpool_lob = BatchNormalization()(GlobalMaxPooling3D()(x5_lob))
    xpool_spic = BatchNormalization()(GlobalMaxPooling3D()(x5_spic))
    xpool_malig = BatchNormalization()(GlobalMaxPooling3D()(x5_malig))

    #from here let's branch and predict different things
    xout_diam = dense_branch(xpool_diam,
                             name='o_diam',
                             outsize=1,
                             activation='relu')
    xout_lob = dense_branch(xpool_lob,
                            name='o_lob',
                            outsize=1,
                            activation='relu')
    xout_spic = dense_branch(xpool_spic,
                             name='o_spic',
                             outsize=1,
                             activation='relu')
    xout_malig = dense_branch(xpool_malig,
                              name='o_mal',
                              outsize=1,
                              activation='relu')

    #sphericity
    # xout_spher= dense_branch(xpool_norm,name='o_spher',outsize=4,activation='softmax')

    # xout_text = dense_branch(xpool_norm,name='o_t',outsize=4,activation='softmax')

    #calcification
    # xout_calc = dense_branch(xpool_norm,name='o_c',outsize=7,activation='softmax')

    model = Model(input=xin,
                  output=[xout_diam, xout_lob, xout_spic, xout_malig])

    if input_shape[1] == 32:
        lr_start = .003
    elif input_shape[1] == 64:
        lr_start = .001
    elif input_shape[1] == 128:
        lr_start = 1e-4
    elif input_shape[1] == 96:
        lr_start = 5e-4

    opt = Nadam(lr_start, clipvalue=1.0)
    print 'compiling model'

    model.compile(optimizer=opt,
                  loss={
                      'o_diam': 'mae',
                      'o_lob': 'mae',
                      'o_spic': 'mae',
                      'o_mal': 'mae'
                  },
                  loss_weights={
                      'o_diam': 1.0,
                      'o_lob': 2.0,
                      'o_spic': 2.0,
                      'o_mal': 5.0
                  })
    return model