Exemplo n.º 1
0
def build_model(input_shape):

	xin = Input(input_shape)
	
	#shift the below down by one
	x1 = conv_block(xin,8,activation='relu')
	x1_ident = AveragePooling3D()(xin)
	x1_merged = merge([x1, x1_ident],mode='concat', concat_axis=1)
	
	x2_1 = conv_block(x1_merged,24,activation='relu') #outputs 37 ch
	x2_ident = AveragePooling3D()(x1_ident)
	x2_merged = merge([x2_1,x2_ident],mode='concat', concat_axis=1)
	
	#by branching we reduce the #params
	x3_ident = AveragePooling3D()(x2_ident)
	x3_malig = conv_block(x2_merged,48,activation='relu') #outputs 25 + 16 ch = 41
	x3_malig_merged = merge([x3_malig,x3_ident],mode='concat', concat_axis=1)
	
	x4_ident = AveragePooling3D()(x3_ident)
	x4_malig = conv_block(x3_malig_merged,64,activation='relu') #outputs 25 + 16 ch = 41
	x4_merged = merge([x4_malig,x4_ident],mode='concat', concat_axis=1)
	
	
	x5_malig = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41
	xpool_malig = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_malig))
	xout_malig = Dense(1, name='o_mal', activation='relu')(xpool_malig) #relu output

	x5_diam = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41
	xpool_diam = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_diam))
	xout_diam = Dense(1, name='o_diam', activation='relu')(xpool_diam) #relu output

	x5_lob = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41
	xpool_lob = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_lob))
	xout_lob = Dense(1, name='o_lob', activation='relu')(xpool_lob) #relu output

	x5_spic = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41
	xpool_spic = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_spic))
	xout_spic = Dense(1, name='o_spic', activation='relu')(xpool_spic) #relu output

	
	model = Model(input=xin,output=[xout_diam, xout_lob, xout_spic, xout_malig])
	
	if input_shape[1] == 32:
		lr_start = .01
	elif input_shape[1] == 64:
		lr_start = .003
	elif input_shape[1] == 128:
		lr_start = .002
	# elif input_shape[1] == 96:
		# lr_start = 5e-4
	
	opt = Nadam(lr_start,clipvalue=1.0)
	print 'compiling model'

	model.compile(optimizer=opt,loss='mse',loss_weights={'o_diam':0.06, 'o_lob':0.5, 'o_spic':0.5, 'o_mal':1.0})
	return model
Exemplo n.º 2
0
def build_model(input_shape):

    xin = Input(input_shape)

    #shift the below down by one
    x1 = conv_block(xin, 8, activation='relu')
    x1_ident = AveragePooling3D()(xin)
    x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1)

    x2_1 = conv_block(x1_merged, 24, activation='relu')  #outputs 37 ch
    x2_ident = AveragePooling3D()(x1_ident)
    x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1)

    #by branching we reduce the #params
    x3_ident = AveragePooling3D()(x2_ident)
    x3_malig = conv_block(x2_merged, 36,
                          activation='relu')  #outputs 25 + 16 ch = 41

    x3_malig_merged = merge([x3_malig, x3_ident], mode='concat', concat_axis=1)

    x4_ident = AveragePooling3D()(x3_ident)
    x4_malig = conv_block(x3_malig_merged, 48,
                          activation='relu')  #outputs 25 + 16 ch = 41

    x4_malig_merged = merge([x4_malig, x4_ident], mode='concat', concat_axis=1)

    x5_malig = conv_block(x4_malig_merged, 64)  #outputs 25 + 16 ch = 41

    xpool_malig = BatchNormalization(momentum=0.995)(
        GlobalMaxPooling3D()(x5_malig))
    xout_malig = Dense(1, name='o_mal',
                       activation='sigmoid')(xpool_malig)  #sigmoid output

    model = Model(input=xin, output=xout_malig)

    if input_shape[1] == 32:
        lr_start = .01
    elif input_shape[1] == 64:
        lr_start = .003
    elif input_shape[1] == 128:
        lr_start = .002
    # elif input_shape[1] == 96:
    # lr_start = 5e-4

    opt = Nadam(lr_start, clipvalue=1.0)
    print 'compiling model'

    model.compile(optimizer=opt, loss='mae')
    return model
Exemplo n.º 3
0
def Attention_block(input_tensor,
                    spatial_attention=True,
                    temporal_attention=True):
    """

    @param input_tensor: 输入张量
    @param spatial_attention: flag to decide to allow spatial_attention
    @param temporal_attention: flag to decide to allow temporal_attention
    @return: tensor after attention
    """
    tem = input_tensor
    # 将任意表达式封装为一个layer对象
    x = Lambda(channel_wise_mean)(input_tensor)
    x = keras.layers.Reshape([
        K.int_shape(input_tensor)[1],
        K.int_shape(input_tensor)[2],
        K.int_shape(input_tensor)[3], 1
    ])(x)

    nbSpatial = K.int_shape(input_tensor)[1] * K.int_shape(input_tensor)[2]
    nbTemporal = K.int_shape(input_tensor)[-2]

    if spatial_attention:
        spatial = AveragePooling3D(
            pool_size=[1, 1, K.int_shape(input_tensor)[-2]])(x)
        spatial = keras.layers.Flatten()(spatial)
        spatial = Dense(nbSpatial)(spatial)
        spatial = Activation('sigmoid')(spatial)
        spatial = keras.layers.Reshape(
            [K.int_shape(input_tensor)[1],
             K.int_shape(input_tensor)[2], 1, 1])(spatial)

        tem = keras.layers.multiply([input_tensor, spatial])

    if temporal_attention:
        temporal = AveragePooling3D(pool_size=[
            K.int_shape(input_tensor)[1],
            K.int_shape(input_tensor)[2], 1
        ])(x)
        temporal = keras.layers.Flatten()(temporal)
        temporal = Dense(nbTemporal)(temporal)
        temporal = Activation('sigmoid')(temporal)
        temporal = keras.layers.Reshape(
            [1, 1, K.int_shape(input_tensor)[-2], 1])(temporal)

        tem = keras.layers.multiply([temporal, tem])

    return tem
Exemplo n.º 4
0
def down_stage(ip,
               nb_layers,
               nb_filter,
               growth_rate,
               dropout_rate,
               weight_decay,
               compression,
               pooling=True):

    x0, nb_filter = __dense_block(ip,
                                  nb_layers,
                                  nb_filter,
                                  growth_rate,
                                  bottleneck=True,
                                  dropout_rate=dropout_rate,
                                  weight_decay=weight_decay)
    print('x:', K.int_shape(x0))
    print(nb_filter)
    x1 = transition_block(x0, nb_filter, weight_decay=weight_decay)
    x = transition_block(ip, nb_filter, weight_decay=weight_decay)
    #    addx=Apply_Attention(x1,x)
    addx = add([x, x1])
    if pooling:
        out = AveragePooling3D(strides=(2, 2, 2))(addx)
        return addx, out
    return addx
Exemplo n.º 5
0
def down_stage(ip,
               nb_layers,
               nb_filter,
               growth_rate,
               dropout_rate,
               weight_decay,
               compression,
               train_flage,
               name_flage,
               pooling=True):

    x0, nb_filter = __dense_block(ip,
                                  nb_layers,
                                  nb_filter,
                                  growth_rate,
                                  bottleneck=True,
                                  dropout_rate=dropout_rate,
                                  weight_decay=weight_decay,
                                  train_flage=train_flage,
                                  name_flage=name_flage + '_DB_')
    x1 = transition_block(x0,
                          nb_filter,
                          weight_decay=weight_decay,
                          train_flage=train_flage,
                          name_flage=name_flage + '_TB0_')
    x = transition_block(ip,
                         nb_filter,
                         weight_decay=weight_decay,
                         train_flage=train_flage,
                         name_flage=name_flage + '_TB1_')
    addx = add([x, x1], name=name_flage + 'ADD')
    if pooling:
        out = AveragePooling3D(strides=(2, 2, 2))(addx)
        return addx, out
    return addx
Exemplo n.º 6
0
def Gen_Dis_model(trable_flage=True,reduction=0.5, dropout_rate=0.3, weight_decay=5e-4):


    if  K.image_data_format() == 'channels_last':
      img_input = Input(shape=(img_rows,img_cols,chan,1))
      concat_axis=-1
    else:
      img_input = Input(shape=(1,chan, img_rows, img_cols))
      concat_axis=1


    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # compute compression factor
    compression = 1.0 - reduction
    nb_layers=[4,8,16,8,4,2]
    growth_rate=32
    # Initial convolution

    #stage1
    x1 = Conv3D(64, (3,3,3),strides=(1,1,1),kernel_initializer='he_normal',padding='same',
               use_bias=False, kernel_regularizer=l2(weight_decay),trainable=True,name='x1')(img_input)
    x = BatchNormalization(axis=concat_axis,trainable=True,name='x1_BN')(x1)
    x = Activation('relu')(x)
    x = AveragePooling3D((2,2,2))(x)

    #stage1
    s1_x0,s1_x = down_stage(x,nb_layers[0],0,growth_rate,dropout_rate,weight_decay,compression,train_flage=True,name_flage='s1')
  
    #stage2
    s2_x0,s2_x = down_stage(s1_x,nb_layers[1],0,growth_rate,dropout_rate,weight_decay,compression,train_flage=True,name_flage='s2')    

    #stage3 
    s3_x0 = down_stage(s2_x,nb_layers[2],0,growth_rate,dropout_rate,weight_decay,compression,train_flage=True,name_flage='s3',pooling=False)

    #stage4
    D1 = Decon_stage(s3_x0,256,kernel_size=(3,3,3),strides=(2,2,2),weight_decay=weight_decay,train_flage=True,name_flage='D1')
    con1 = add([D1,s2_x0])
    s4_x = up_stage(con1,nb_layers[3],0,growth_rate,dropout_rate,weight_decay,compression,train_flage=True,name_flage='s4')
 
    #stage5    
    D2 = Decon_stage(s4_x,128,kernel_size=(3,3,3),strides=(2,2,2),weight_decay=weight_decay,train_flage=True,name_flage='D2')
    con2 =add([D2,s1_x0])   
    s5_x = up_stage(con2,nb_layers[4],0,growth_rate,dropout_rate,weight_decay,compression,train_flage=True,name_flage='s5')    
   
    #stage6  
    D3 = Decon_stage(s5_x,64,kernel_size=(3,3,3),strides=(2,2,2),weight_decay=weight_decay,train_flage=True,name_flage='D3')

    con3 = add([D3,x1])
    s6_x = up_stage(con3,nb_layers[5],0,growth_rate,dropout_rate,weight_decay,compression,train_flage=True,name_flage='s6')       
    main_out = output(s6_x,weight_decay,train_flage=True,name_flage='out')  

    Discriminator_model.trainable=False
    GAN_loss = Discriminator_model()([s4_x,s5_x,s6_x])    
    model = Model(img_input, [main_out,main_out,GAN_loss]) 
    
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss=[Dist_Loss,'binary_crossentropy','binary_crossentropy' ], loss_weights=[0.1,1.0,1.0], metrics=[dice_coef] )#
    return model 
Exemplo n.º 7
0
def transition_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
    Args:
        input: keras tensor
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Convolution3D(nb_filter, (1, 1, 1),
                      kernel_initializer="he_uniform",
                      padding="same",
                      use_bias=False,
                      kernel_regularizer=l2(weight_decay))(input)
    if dropout_rate is not None:
        x = Dropout(dropout_rate)(x)
    x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)

    return x
Exemplo n.º 8
0
def ASPP(x, filters):
    shape = x.shape

    y1 = AveragePooling3D(pool_size=(shape[1], shape[2], shape[3]))(x)
    y1 = Conv3D(filters/2, 1, padding="same")(y1)
    y1 = BatchNormalization()(y1)
    y1 = Activation("relu")(y1)
    y1 = UpSampling3D((shape[1], shape[2], shape[3]))(y1)

    y2 = Conv3D(filters/2, 1, dilation_rate=1, padding="same", use_bias=False)(x)
    y2 = BatchNormalization()(y2)
    y2 = Activation("relu")(y2)

    y3 = Conv3D(filters/2, 3, dilation_rate=2, padding="same", use_bias=False)(x)
    y3 = BatchNormalization()(y3)
    y3 = Activation("relu")(y3)

    y4 = Conv3D(filters/2, 3, dilation_rate=4, padding="same", use_bias=False)(x)
    y4 = BatchNormalization()(y4)
    y4 = Activation("relu")(y4)

    y5 = Conv3D(filters/2, 3, dilation_rate=8, padding="same", use_bias=False)(x)
    y5 = BatchNormalization()(y5)
    y5 = Activation("relu")(y5)

    y = Concatenate()([y1, y2, y3, y4, y5])

    y = Conv3D(filters, 1, dilation_rate=1, padding="same", use_bias=False)(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    return y
Exemplo n.º 9
0
def CFF(input_list, input_size, filters, i):
    out_shape = input_size/pow(2,i)

    y = tf.zeros_like(input_list[i-1])
    for j,x in enumerate(input_list):
        if j < i-1:
            down_factor = int((input_size/pow(2,j+1)) / out_shape)
            x = AveragePooling3D((down_factor, down_factor, down_factor))(x)
            x = Conv3D(filters, (1, 1, 1), padding='same')(x)
            sigm = Activation('sigmoid')(x)
            x = Multiply()([x, sigm])
            y = Add()([y, x])
        if j > i-1:
            up_factor = int(out_shape / (input_size/pow(2,j+1)))
            x = Conv3D(filters, (1, 1, 1), padding='same')(x)
            x = UpSampling3D((up_factor, up_factor, up_factor))(x)
            sigm = Activation('sigmoid')(x)
            x = Multiply()([x, sigm])
            y = Add()([y,x])

    x_i = input_list[i-1]
    x_i_sigm = Activation('sigmoid')(x_i)
    x_i_sigm = -1 * x_i_sigm + 1
    out = Multiply()([x_i_sigm, y])
    out = Add()([out, x_i])
    return out
Exemplo n.º 10
0
def build_model(input_shape):

    xin = Input(input_shape)

    x1 = conv_block(xin,8,activation='crelu')
    x1_ident = AveragePooling3D()(xin)
    x1_merged = concatenate([x1, x1_ident], axis=1)
    
    x2_1 = conv_block(x1_merged,24,activation='crelu',init='orthogonal') 
    x2_ident = AveragePooling3D()(x1_ident)
    x2_merged = concatenate([x2_1,x2_ident], axis=1)
    
    #by branching we reduce the #params
    x3_1 = conv_block(x2_merged,36,activation='crelu',init='orthogonal') 
    x3_ident = AveragePooling3D()(x2_ident)
    x3_merged = concatenate([x3_1,x3_ident], axis=1)

    x4_1 = conv_block(x3_merged,36,activation='crelu',init='orthogonal') 
    x4_ident = AveragePooling3D()(x3_ident)
    x4_merged = concatenate([x4_1,x4_ident], axis=1)
    
    x5_1 = conv_block(x4_merged,64,pool=False,init='orthogonal') 
    
    xpool = BatchNormalization()(GlobalMaxPooling3D()(x5_1))
    
    xout = dense_branch(xpool,outsize=1,activation='sigmoid')
    
    
    model = Model(input=xin,output=xout)
    
    if input_shape[1] == 32 :
        lr_start = 1e-5
    elif input_shape[1] == 64:
        lr_start = 1e-5
    elif input_shape[1] == 128:
        lr_start = 1e-4
    elif input_shape[1] == 96:
        lr_start = 5e-4
    elif input_shape[1] == 16:
        lr_start = 1e-6
        
    opt = Nadam(lr_start,clipvalue=1.0)
    print('compiling model')

    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
    return model
Exemplo n.º 11
0
def SqueezeModel2(input_tensor):
    droprate = 0.2
    x = Conv3D(filters=96,
               kernel_size=(1, 1, 1),
               strides = (1, 1, 1),
               padding='same',
               activation='relu',
               kernel_initializer=glorot_uniform(seed=1),
               bias_initializer='zeros', name="conv1")(input_tensor)

    x = Conv3D(filters=16, kernel_size=(1, 1, 1), kernel_initializer=glorot_uniform(seed=1), activation='relu', name="fire2_squeeze")(x)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(64, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire2_expand1")(x)
    expand2 = Conv3D(64, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire2_expand2")(x)
    merge1 = concatenate([expand1, expand2], axis=4, name="merge_1")
    x = Conv3D(16, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire3_squeeze")(merge1)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(64, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire3_expand1")(x)
    expand2 = Conv3D(64, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire3_expand2")(x)
    merge2 = concatenate([expand1, expand2], axis=4, name="merge_2")
    x = Conv3D(32, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire4_squeeze")(merge2)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(128, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire4_expand1")(x)
    expand2 = Conv3D(128, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire4_expand2")(x)
    merge3 = concatenate([expand1, expand2], axis=4, name="merge_3")
    maxpool4 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), name="maxpool_4")(merge3)
    x = Conv3D(filters=32, kernel_size=(1, 1, 1), activation='relu', name="fire5_squeeze")(maxpool4)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(128, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire5_expand1")(x)
    expand2 = Conv3D(128, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire5_expand2")(x)
    merge4 = concatenate([expand1, expand2], axis=4, name="merge_4")
    x = Conv3D(48, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire6_squeeze")(merge4)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(192, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire6_expand1")(x)
    expand2 = Conv3D(192, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire6_expand2")(x)
    merge5 = concatenate([expand1, expand2], axis=4, name="merge_5")
    x = Conv3D(48, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire7_squeeze")(merge5)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(192, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire7_expand1")(x)
    expand2 = Conv3D(192, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire7_expand2")(x)
    merge6 = concatenate([expand1, expand2], axis=4, name="merge_6")
    x = Conv3D(64, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire8_squeeze")(merge6)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(256, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire8_expand1")(x)
    expand2 = Conv3D(256, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire8_expand2")(x)
    merge7 = concatenate([expand1, expand2], axis=4, name="merge_7")
    avgpool = AveragePooling3D(pool_size=(3, 3, 3), padding='same', name="avg8")(merge7)
    flatten = Flatten(name="flatten")(avgpool)
    output = Dense(1, activation='linear', kernel_initializer=glorot_uniform(seed=1))(flatten)

    return output
Exemplo n.º 12
0
def PEE(x, filters):
    if filters > 30:
        pool_size_1 = (3, 3, 3)
        pool_size_2 = (5, 5, 5)
    else:
        pool_size_1 = (5, 5, 5)
        pool_size_2 = (7, 7, 7)

    x = Conv3D(filters/2, (1, 1, 1), padding='same')(x)
    x_1 = AveragePooling3D(pool_size=pool_size_1, strides = (1, 1, 1), padding='same')(x)
    x_2 = AveragePooling3D(pool_size=pool_size_2, strides = (1, 1, 1), padding='same')(x)

    x_11 = Subtract()([x, x_1])
    x_22 = Subtract()([x, x_2])

    x = Concatenate()([x, x_11, x_22])
    x = Conv3D(filters, (1, 1, 1), padding='same')(x)
    return x
Exemplo n.º 13
0
def Attention_block(input_tensor,
                    spatial_attention=True,
                    temporal_attention=True):
    tem = input_tensor
    x = Lambda(channel_wise_mean)(input_tensor)
    x = keras.layers.Reshape([
        K.int_shape(input_tensor)[1],
        K.int_shape(input_tensor)[2],
        K.int_shape(input_tensor)[3], 1
    ])(x)

    nbSpatial = K.int_shape(input_tensor)[1] * K.int_shape(input_tensor)[2]
    nbTemporal = K.int_shape(input_tensor)[-2]

    if spatial_attention:
        spatial = AveragePooling3D(
            pool_size=[1, 1, K.int_shape(input_tensor)[-2]])(x)
        spatial = keras.layers.Flatten()(spatial)
        spatial = Dense(nbSpatial)(spatial)
        spatial = Activation('sigmoid')(spatial)
        spatial = keras.layers.Reshape(
            [K.int_shape(input_tensor)[1],
             K.int_shape(input_tensor)[2], 1, 1])(spatial)

        tem = keras.layers.multiply([input_tensor, spatial])

    if temporal_attention:
        temporal = AveragePooling3D(pool_size=[
            K.int_shape(input_tensor)[1],
            K.int_shape(input_tensor)[2], 1
        ])(x)
        temporal = keras.layers.Flatten()(temporal)
        temporal = Dense(nbTemporal)(temporal)
        temporal = Activation('sigmoid')(temporal)
        temporal = keras.layers.Reshape(
            [1, 1, K.int_shape(input_tensor)[-2], 1])(temporal)

        tem = keras.layers.multiply([temporal, tem])

    return tem
Exemplo n.º 14
0
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv3D(int(nb_filter * compression), (1, 1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    return x
Exemplo n.º 15
0
def DenseNetTransit(x, rate=1, name=None):
    if rate != 1:
        out_features = x.get_shape().as_list()[-1] * rate
        x = BatchNormalization(center=True, scale=True, name=name + '_bn')(x)
        x = Activation('relu', name=name + '_relu')(x)
        x = Conv3D(filters=out_features,
                   kernel_size=1,
                   strides=1,
                   padding='same',
                   kernel_initializer='he_normal',
                   use_bias=False,
                   name=name + '_conv')(x)
    x = AveragePooling3D(pool_size=2, strides=2, padding='same')(x)
    return x
Exemplo n.º 16
0
def transition_block(x,
                     stage,
                     nb_filter,
                     compression=1.0,
                     dropout_rate=None,
                     weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout 
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    ## 1x1x1 convolution
    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name=conv_name_base + '_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base + '_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Convolution3D(int(nb_filter * compression),
                      1,
                      1,
                      1,
                      name=conv_name_base,
                      bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    ## 2x2x2 avg pooling
    x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), name=pool_name_base)(x)

    return x
Exemplo n.º 17
0
        def conv3d(layer_input, filters, f_size=3, bn=True):
            """Layers used during downsampling"""
            if self.settings['POOLING'] == "MAX":
                d = Conv3D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
                d = MaxPooling3D(padding='same', data_format="channels_last")(d)
            elif self.settings['POOLING'] == "AVERAGE":
                d = Conv3D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
                d = AveragePooling3D(padding='same', data_format="channels_last")(d)
            elif self.settings['POOLING'] == "NONE":
                d = Conv3D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)

            # GANHACKS: add dropout with specified value
            if self.settings['GANHACKS']:
                d = Dropout(rate=self.settings['DROPOUT'])(d)
            if bn and self.settings['BATCH_NORMALIZATION']:
                d = BatchNormalization(momentum=0.8)(d)
            # GANHACKS: adding gaussian noise to every layer of G (Zhao et. al. EBGAN)
            if self.settings['GANHACKS']:
                d = GaussianNoise(stddev=self.settings['GAUSSIAN_NOISE_TO_G'])(d)
            print('downsampling:\t\t\t', d.shape)
            return d
Exemplo n.º 18
0
def dense_model():
    model = Sequential()
    model.add(Reshape((100, 100, 4, 1), input_shape=(100, 100, 4)))
    model.add(
        Lambda(
            lambda x: 2 * x - 1.,
            batch_input_shape=(1, 100, 100, 4),  # 100by100by2
            output_shape=(100, 100, 4, 1)))  # 100by100by2
    model.add(
        Convolution3D(16,
                      kernel_size=(8, 8, 2),
                      subsample=(4, 4, 1),
                      border_mode="valid",
                      bias_initializer="random_uniform"))
    model.add(ELU())
    model.add(AveragePooling3D(pool_size=(2, 2, 1)))
    model.add(
        Convolution3D(16,
                      kernel_size=(4, 4, 2),
                      subsample=(2, 2, 1),
                      border_mode="valid",
                      bias_initializer="random_uniform"))
    model.add(ELU())
    model.add(
        Convolution3D(24,
                      kernel_size=(3, 3, 2),
                      subsample=(1, 1, 1),
                      border_mode="valid",
                      bias_initializer="random_uniform"))
    model.add(ELU())
    model.add(Flatten())
    model.add(Dense(256, bias_initializer="random_uniform"))
    model.add(ELU())
    model.add(Dense(32, activation="relu", bias_initializer="random_uniform"))
    model.add(Dense(2, bias_initializer="random_uniform"))

    sgd = keras.optimizers.Adam(lr=1e-4, decay=1e-8)
    model.compile(optimizer=sgd, loss="mse")
    return model
Exemplo n.º 19
0
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    """

    @param ip:keras tensor
    @param nb_filter:number of filters
    @param compression:
        压缩系数小于1: DenseNet-C 压缩系数小于1+bottleneck :DenseNet-BC
        caculated as 1 - reduction. Reduces the number of features maps in the transition block
    @param weight_decay:weight decay factor
    @return x : keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    """
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv3D(int(nb_filter * compression), (1, 1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    return x
Exemplo n.º 20
0
def __transition_block(ip,
                       nb_filter,
                       compression=1.0,
                       dropout_rate=None,
                       weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    x = BatchNormalization(mode=0,
                           axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(ip)
    x = Activation('relu')(x)
    x = Convolution3D(int(nb_filter * compression),
                      1,
                      1,
                      1,
                      init="he_uniform",
                      border_mode="same",
                      bias=False,
                      W_regularizer=l2(weight_decay))(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    return x
def build_model(input_shape):

    xin = Input(input_shape)

    #shift the below down by one
    x1 = conv_block(xin, 8, activation='crelu')  #outputs 13 ch
    x1_ident = AveragePooling3D()(xin)
    x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1)

    x2_1 = conv_block(x1_merged,
                      24,
                      activation='crelu',
                      init=looks_linear_init)  #outputs 37 ch
    x2_ident = AveragePooling3D()(x1_ident)
    x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1)

    #by branching we reduce the #params
    x3_ident = AveragePooling3D()(x2_ident)

    x3_diam = conv_block(x2_merged,
                         36,
                         activation='crelu',
                         init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x3_lob = conv_block(x2_merged,
                        36,
                        activation='crelu',
                        init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x3_spic = conv_block(x2_merged,
                         36,
                         activation='crelu',
                         init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x3_malig = conv_block(x2_merged,
                          36,
                          activation='crelu',
                          init=looks_linear_init)  #outputs 25 + 16 ch = 41

    x3_diam_merged = merge([x3_diam, x3_ident], mode='concat', concat_axis=1)
    x3_lob_merged = merge([x3_lob, x3_ident], mode='concat', concat_axis=1)
    x3_spic_merged = merge([x3_spic, x3_ident], mode='concat', concat_axis=1)
    x3_malig_merged = merge([x3_malig, x3_ident], mode='concat', concat_axis=1)

    x4_ident = AveragePooling3D()(x3_ident)
    x4_diam = conv_block(x3_diam_merged,
                         36,
                         activation='crelu',
                         init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x4_lob = conv_block(x3_lob_merged,
                        36,
                        activation='crelu',
                        init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x4_spic = conv_block(x3_spic_merged,
                         36,
                         activation='crelu',
                         init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x4_malig = conv_block(x3_malig_merged,
                          36,
                          activation='crelu',
                          init=looks_linear_init)  #outputs 25 + 16 ch = 41

    x4_diam_merged = merge([x4_diam, x4_ident], mode='concat', concat_axis=1)
    x4_lob_merged = merge([x4_lob, x4_ident], mode='concat', concat_axis=1)
    x4_spic_merged = merge([x4_spic, x4_ident], mode='concat', concat_axis=1)
    x4_malig_merged = merge([x4_malig, x4_ident], mode='concat', concat_axis=1)

    x5_diam = conv_block(x4_diam_merged,
                         64,
                         pool=False,
                         init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x5_lob = conv_block(x4_lob_merged, 64, pool=False,
                        init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x5_spic = conv_block(x4_spic_merged,
                         64,
                         pool=False,
                         init=looks_linear_init)  #outputs 25 + 16 ch = 41
    x5_malig = conv_block(x4_malig_merged,
                          64,
                          pool=False,
                          init=looks_linear_init)  #outputs 25 + 16 ch = 41

    xpool_diam = BatchNormalization()(GlobalMaxPooling3D()(x5_diam))
    xpool_lob = BatchNormalization()(GlobalMaxPooling3D()(x5_lob))
    xpool_spic = BatchNormalization()(GlobalMaxPooling3D()(x5_spic))
    xpool_malig = BatchNormalization()(GlobalMaxPooling3D()(x5_malig))

    #from here let's branch and predict different things
    xout_diam = dense_branch(xpool_diam,
                             name='o_d',
                             outsize=1,
                             activation='relu')
    xout_lob = dense_branch(xpool_lob,
                            name='o_lob',
                            outsize=1,
                            activation='sigmoid')
    xout_spic = dense_branch(xpool_spic,
                             name='o_spic',
                             outsize=1,
                             activation='sigmoid')
    xout_malig = dense_branch(xpool_malig,
                              name='o_mal',
                              outsize=1,
                              activation='sigmoid')

    #sphericity
    # xout_spher= dense_branch(xpool_norm,name='o_spher',outsize=4,activation='softmax')

    # xout_text = dense_branch(xpool_norm,name='o_t',outsize=4,activation='softmax')

    #calcification
    # xout_calc = dense_branch(xpool_norm,name='o_c',outsize=7,activation='softmax')

    model = Model(input=xin,
                  output=[xout_diam, xout_lob, xout_spic, xout_malig])

    if input_shape[1] == 32:
        lr_start = .003
    elif input_shape[1] == 64:
        lr_start = .001
    elif input_shape[1] == 128:
        lr_start = 1e-4
    elif input_shape[1] == 96:
        lr_start = 5e-4

    opt = Nadam(lr_start, clipvalue=1.0)
    print 'compiling model'

    model.compile(optimizer=opt,
                  loss={
                      'o_d': 'mse',
                      'o_lob': 'binary_crossentropy',
                      'o_spic': 'binary_crossentropy',
                      'o_mal': 'binary_crossentropy'
                  },
                  loss_weights={
                      'o_d': 1.0,
                      'o_lob': 5.0,
                      'o_spic': 5.0,
                      'o_mal': 5.0
                  })
    return model
Exemplo n.º 22
0
def build_model(input_shape):

    xin = Input(input_shape)

    #shift the below down by one
    x1 = conv_block(xin, 8, norm=True, drop_rate=0)  #outputs 9 ch
    x1_ident = AveragePooling3D()(xin)
    x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1)

    x2_1 = conv_block(x1_merged, 24, norm=True,
                      drop_rate=0)  #outputs 16+9 ch  = 25
    x2_ident = AveragePooling3D()(x1_ident)
    x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1)

    #by branching we reduce the #params
    x3_1 = conv_block(x2_merged, 64, norm=True,
                      drop_rate=0)  #outputs 25 + 16 ch = 41
    x3_ident = AveragePooling3D()(x2_ident)
    x3_merged = merge([x3_1, x3_ident], mode='concat', concat_axis=1)

    x4_1 = conv_block(x3_merged, 72, norm=True,
                      drop_rate=0)  #outputs 25 + 16 ch = 41
    x4_ident = AveragePooling3D()(x3_ident)
    x4_merged = merge([x4_1, x4_ident], mode='concat', concat_axis=1)

    x5_1 = conv_block(x4_merged, 72, norm=True, pool=False,
                      drop_rate=0)  #outputs 25 + 16 ch = 41

    xpool = GlobalMaxPooling3D()(x5_1)
    xpool_norm = BatchNormalization()(xpool)
    #xpool_norm = GaussianDropout(.1)(xpool_norm)

    #from here let's branch and predict different things
    xout_diam = dense_branch(xpool_norm,
                             name='o_d',
                             outsize=1,
                             activation='relu')

    #sphericity
    # xout_spher= dense_branch(xpool_norm,name='o_spher',outsize=4,activation='softmax')

    # xout_text = dense_branch(xpool_norm,name='o_t',outsize=4,activation='softmax')

    #calcification
    # xout_calc = dense_branch(xpool_norm,name='o_c',outsize=7,activation='softmax')
    xout_cad_falsepositive = dense_branch(xpool_norm,
                                          name='o_fp',
                                          outsize=3,
                                          activation='softmax')

    # xout_cat = merge([xout_text,xout_spher,xout_calc],name='o_cat',mode='concat', concat_axis=1)

    xout_margin = dense_branch(xpool_norm,
                               name='o_marg',
                               outsize=1,
                               activation='sigmoid')
    xout_lob = dense_branch(xpool_norm,
                            name='o_lob',
                            outsize=1,
                            activation='sigmoid')
    xout_spic = dense_branch(xpool_norm,
                             name='o_spic',
                             outsize=1,
                             activation='sigmoid')
    xout_malig = dense_branch(xpool_norm,
                              name='o_mal',
                              outsize=1,
                              activation='sigmoid')

    # xout_numeric = merge([xout_margin, xout_lob, xout_spic, xout_malig],name='o_num',mode='concat',concat_axis=1)

    model = Model(input=xin,
                  output=[
                      xout_diam, xout_lob, xout_spic, xout_malig,
                      xout_cad_falsepositive
                  ])

    if input_shape[1] == 32:
        lr_start = .005
    elif input_shape[1] == 64:
        lr_start = .001
    elif input_shape[1] == 128:
        lr_start = 1e-4
    elif input_shape[1] == 96:
        lr_start = 5e-4

    opt = Nadam(lr_start, clipvalue=1.0)
    print 'compiling model'

    model.compile(optimizer=opt,
                  loss={
                      'o_d': 'mse',
                      'o_lob': 'binary_crossentropy',
                      'o_spic': 'binary_crossentropy',
                      'o_mal': 'binary_crossentropy',
                      'o_fp': 'categorical_crossentropy'
                  },
                  loss_weights={
                      'o_d': 1.0,
                      'o_lob': 5.0,
                      'o_spic': 5.0,
                      'o_mal': 5.0,
                      'o_fp': 5.0
                  })
    return model
Exemplo n.º 23
0
def Dense_net(growth_rate=16,
              reduction=0.5,
              dropout_rate=0.3,
              weight_decay=5e-4,
              upsampling_type='upsampling',
              init_conv_filters=32):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        nb_layers_per_block: number of layers in each dense block.
            Can be a positive integer or a list.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
        upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
            type of upsampling algorithm used.
        input_shape: Only used for shape inference in fully convolutional networks.
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                    Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    if K.image_data_format() == 'channels_last':
        img_input = Input(shape=(img_rows, img_cols, chan, 1))
        concat_axis = -1
    else:
        img_input = Input(shape=(1, chan, img_rows, img_cols))
        concat_axis = 1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # compute compression factor
    compression = 1.0 - reduction
    nb_layers = [4, 8, 16, 8, 4, 2]
    growth_rate = 32
    # Initial convolution

    x1 = Conv3D(64, (3, 3, 3),
                strides=(1, 1, 1),
                kernel_initializer='he_normal',
                padding='same',
                use_bias=False,
                kernel_regularizer=l2(weight_decay))(img_input)
    x = BatchNormalization(axis=concat_axis)(x1)
    x = Activation('relu')(x)
    x = AveragePooling3D((2, 2, 2))(x)
    print('x:', K.int_shape(x))

    # Add dense blocks and transition down block
    #stage1
    #  nb_layers[0]=int((nb_filter[0]-32)/growth_rate)
    print('nb_layers:', (nb_layers[0]))
    s1_x0, s1_x = down_stage(x, nb_layers[0], 0, growth_rate, dropout_rate,
                             weight_decay, compression)
    print('s10,s11:', K.int_shape(s1_x0), K.int_shape(s1_x))

    #stage2
    #   nb_layers[1]=int((nb_filter[1]-nb_layers[0])/growth_rate)
    print('nb_layers:', (nb_layers[1]))
    s2_x0, s2_x = down_stage(s1_x, nb_layers[1], 0, growth_rate, dropout_rate,
                             weight_decay, compression)
    print('s20,s21:', K.int_shape(s2_x0), K.int_shape(s2_x))

    #stage3
    #   nb_layers[2]=int((nb_filter[2]-nb_layers[1])/growth_rate)
    print('nb_layers:', (nb_layers[2]))
    s3_x0 = down_stage(s2_x,
                       nb_layers[2],
                       0,
                       growth_rate,
                       dropout_rate,
                       weight_decay,
                       compression,
                       pooling=False)
    print('s3:', K.int_shape(s3_x0))

    #stage4
    D1 = Decon_stage(s3_x0,
                     256,
                     kernel_size=(3, 3, 3),
                     strides=(2, 2, 2),
                     weight_decay=weight_decay)
    print('D1:', K.int_shape(D1))

    con1 = Apply_Attention(D1, s2_x0)  #add([D1,s2_x0])
    #   nb_layers[3]=int((nb_filter[3]-nb_layers[2])/growth_rate)
    print('nb_layers:', (nb_layers[3]))
    s4_x = up_stage(con1, nb_layers[3], 0, growth_rate, dropout_rate,
                    weight_decay, compression)
    print('s4:', K.int_shape(s4_x))

    #stage5
    D2 = Decon_stage(s4_x,
                     128,
                     kernel_size=(3, 3, 3),
                     strides=(2, 2, 2),
                     weight_decay=weight_decay)
    print('D2:', K.int_shape(D2))
    con2 = Apply_Attention(D2, s1_x0)  #add([D2,s1_x0])
    #    nb_layers[4]=int((nb_filter[4]-nb_layers[3])/growth_rate)
    print('nb_layers:', (nb_layers[4]))
    s5_x = up_stage(con2, nb_layers[4], 0, growth_rate, dropout_rate,
                    weight_decay, compression)
    print('s5:', K.int_shape(s5_x))

    #stage6
    D3 = Decon_stage(s5_x,
                     64,
                     kernel_size=(3, 3, 3),
                     strides=(2, 2, 2),
                     weight_decay=weight_decay)
    print('D3:', K.int_shape(D3))
    con3 = Apply_Attention(D3, x1)  #add([D3,x1])
    print('con3:', K.int_shape(con3))
    s6_x = up_stage(con3, nb_layers[5], 0, growth_rate, dropout_rate,
                    weight_decay, compression)
    print('s6:', K.int_shape(s6_x))
    #########################################################################

    #    output1=Deconv3D(1,(8,8,8),strides=(4,4,4),padding='same')(D1)
    #    output1=Activation('relu')(output1)
    output1 = side_out(D1, 4, weight_decay)  #,output_shape=(None,1,16,64,64)
    output2 = side_out(D2, 2, weight_decay)
    output3 = output(D3, weight_decay)  #,output_shape=(None,1,16,64,64)
    main_out = output(s6_x, weight_decay)
    print(K.int_shape(output1))
    print(K.int_shape(output2))
    print(K.int_shape(output3))
    print(K.int_shape(main_out))
    model = Model(img_input, [output1, output2, output3, main_out])
    #  plot_model(model, to_file='Vnet.png',show_shapes=True)
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='binary_crossentropy',
                  loss_weights=[0.1, 0.15, 0.25, 0.5],
                  metrics=[dice_coef
                           ])  #'binary_crossentropy'loss_weights=[0.3,0.6,1.0]
    return model
Exemplo n.º 24
0
def sofiamodel(Inputs, nclasses, nregressions, dropoutRate=0.05, momentum=0.6):
    #image = Input(shape=(25, 25, 25, 1))

    x = Inputs[1]
    globals = Inputs[0]
    totalrecenergy = Inputs[2]
    totalrecenergy = Dense(1, kernel_initializer='zeros',
                           trainable=False)(totalrecenergy)

    x = Convolution3D(32,
                      kernel_size=(1, 1, 1),
                      strides=(1, 1, 1),
                      activation='relu',
                      kernel_initializer='lecun_uniform',
                      kernel_regularizer=l2(l2_lambda))(x)
    x = Dropout(dropoutRate)(x)
    x = Convolution3D(12,
                      kernel_size=(1, 1, 1),
                      strides=(1, 1, 1),
                      activation='relu',
                      kernel_initializer='lecun_uniform')(x)
    x = BatchNormalization(momentum=momentum)(x)
    preprocessed = Dropout(dropoutRate)(x)

    x = Convolution3D(32, (5, 5, 5),
                      border_mode='same',
                      kernel_regularizer=l2(l2_lambda))(preprocessed)
    x = LeakyReLU()(x)
    x = Dropout(dropoutRate)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Convolution3D(8, (5, 5, 5),
                      border_mode='valid',
                      kernel_regularizer=l2(l2_lambda))(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Convolution3D(8, (5, 5, 5),
                      border_mode='valid',
                      kernel_regularizer=l2(l2_lambda))(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Convolution3D(
        8,
        (5, 5, 5),
        border_mode='valid',
        kernel_regularizer=l2(l2_lambda),
    )(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)

    x = AveragePooling3D((2, 2, 6))(x)
    x = Flatten()(x)

    smallshower = Cropping3D(cropping=((4, 4), (4, 4), (0, 0)),
                             name='muoncrop')(preprocessed)
    smallshower = Convolution3D(
        32,
        (3, 3, 5),
        strides=(2, 2, 3),
        border_mode='same',
        name='muon0',
        kernel_regularizer=l2(l2_lambda),
    )(smallshower)
    smallshower = LeakyReLU()(smallshower)
    smallshower = BatchNormalization()(smallshower)
    smallshower = Dropout(dropoutRate)(smallshower)
    smallshower = Convolution3D(
        16,
        (1, 1, 5),
        strides=(1, 1, 3),
        border_mode='same',
        name='muon1',
        kernel_regularizer=l2(l2_lambda),
    )(smallshower)
    smallshower = LeakyReLU()(smallshower)
    smallshower = BatchNormalization()(smallshower)
    smallshower = Dropout(dropoutRate)(smallshower)
    smallshower = Convolution3D(
        4,
        (1, 1, 5),
        strides=(1, 1, 3),
        border_mode='same',
        name='muon2',
        kernel_regularizer=l2(l2_lambda),
    )(smallshower)
    smallshower = LeakyReLU()(smallshower)
    smallshower = BatchNormalization()(smallshower)
    smallshower = Dropout(dropoutRate)(smallshower)
    flattenedsmall = Flatten()(smallshower)

    merged = Concatenate()([
        globals, x, totalrecenergy, flattenedsmall
    ])  #add the inputs again in case some don't like the multiplications

    x = Dense(32,
              activation='relu',
              kernel_initializer='lecun_uniform',
              name='firstDense')(merged)
    x = Dense(1,
              activation='linear',
              kernel_initializer='lecun_uniform',
              use_bias=False)(x)
    predictE = Dense(1,
                     activation='linear',
                     kernel_initializer='lecun_uniform',
                     name='pred_E_corr')(x)

    predictID = Dense(nclasses,
                      activation='softmax',
                      kernel_initializer='lecun_uniform',
                      name='ID_pred')(x)

    predictions = [predictID, predictE]

    model = Model(inputs=Inputs, outputs=predictions)
    return model
    # Initialize model and set model-specific variables
    output_units = train_gen.get_output_shape()[-1]
    sp_block_filters = [128, 256, 512, 768]
    downsize_values = [(6,6), (5,5), (4,4)]
    
    pcn_cell = pcn.PCN_Cell(output_units,
                                stem_plus_block_filters=sp_block_filters,
                                time_steps_p_block=[1, 2, 3],
                                downsize_block_indices=[0, 1, 2],
                                downsize_values=downsize_values,
                                name='pcn_cell')
    
    
    # Use AveragePooling3D with pool_size=(1, X, Y) to reduce size of input frames
    input = Input(train_gen.get_input_shape()[1:])
    input_aver = AveragePooling3D(pool_size=(3, 2, 2))(input)
    
    final_layer = pcn_cell(input_aver)    
    if pcn_cell.total_states > 0:
        final_layer = final_layer[0]
    
    final_preds = Activation('softmax')(final_layer)    
    model = Model(input, final_preds)
    
    
    # Print model summary
    model_pcn_layer = model.get_layer('pcn_cell')
    gen_utils.print_internal_RNN_layers(model_pcn_layer)
    model.summary()

    
Exemplo n.º 26
0
def DenseNet(nb_dense_block=3,
             growth_rate=24,
             nb_filter=64,
             reduction=0.0,
             dropout_rate=0.0,
             weight_decay=1e-4,
             classes=1000,
             weights_path=None):
    '''Instantiate the DenseNet 121 architecture,
        # Arguments
            nb_dense_block: number of dense blocks to add to end
            growth_rate: number of filters to add per dense block
            nb_filter: initial number of filters
            reduction: reduction factor of transition blocks.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            classes: optional number of classes to classify images
            weights_path: path to pre-trained weights
        # Returns
            A Keras model instance.
    '''
    ## nb_dense_block = 3 from paper
    ## k=24 from paper
    ## I'm not sure what nb_filter should be
    eps = 1.1e-5

    # compute compression factor
    compression = 1.0 - reduction

    ## Change the size of images here. Note, I think the 3 represents RGB layers? So a 3d spatio-temopral densenet should be (width, height, num_frames)
    ## Paper has images scaled to 100Hx100W, 16 frames
    # Handle Dimension Ordering for different backends
    global concat_axis
    concat_axis = 3
    img_input = Input(shape=(100, 100, 3, 16), name='data')

    # From architecture for ImageNet (Table 1 in the paper)
    nb_filter = 64
    ## The paper says x4 for each layer, 3 total
    nb_layers = [4, 4, 4]

    ## Note: subsample = strides
    ## Convolution-3D: 7x7x7 conv, stride=2
    x = ZeroPadding3D((3, 3, 3), name='conv1_zeropadding')(img_input)
    x = Convolution3D(nb_filter,
                      7,
                      7,
                      7,
                      subsample=(2, 2, 2),
                      name='conv1',
                      bias=False)(x)

    ## Pooling-3D: 3x3x3 avg pool, stride=2
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)
    x = ZeroPadding3D((1, 1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), name='pool1')(x)

    ## Add dense blocks 1, 2
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)

        ## Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    ## Add last dense block: 3 (since we don't have another transition after)
    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    ## Classification Layer
    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name='conv' + str(final_stage) + '_blk_bn')(x)
    x = Scale(axis=concat_axis,
              name='conv' + str(final_stage) + '_blk_scale')(x)
    x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x)

    ## For our paper, we want a 7x7x4 avg pool instead of a global average pool, assumed to be strides = 1 b/c not specified
    ## x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
    x = AveragePooling3D((7, 7, 4), strides=(1, 1, 1), name=pool_name_base)(x)

    ## Fully connected (dense) 2D softmax
    ## Note: the original 2d densenet paper does a 1000D softmax, but I think this should be 2d since the pooling is no longer global.
    x = Dense(classes, name='fc6')(x)
    x = Activation('softmax', name='prob')(x)

    model = Model(img_input, x, name='densenet')

    if weights_path is not None:
        model.load_weights(weights_path)

    return model