def build_model(input_shape): xin = Input(input_shape) #shift the below down by one x1 = conv_block(xin,8,activation='relu') x1_ident = AveragePooling3D()(xin) x1_merged = merge([x1, x1_ident],mode='concat', concat_axis=1) x2_1 = conv_block(x1_merged,24,activation='relu') #outputs 37 ch x2_ident = AveragePooling3D()(x1_ident) x2_merged = merge([x2_1,x2_ident],mode='concat', concat_axis=1) #by branching we reduce the #params x3_ident = AveragePooling3D()(x2_ident) x3_malig = conv_block(x2_merged,48,activation='relu') #outputs 25 + 16 ch = 41 x3_malig_merged = merge([x3_malig,x3_ident],mode='concat', concat_axis=1) x4_ident = AveragePooling3D()(x3_ident) x4_malig = conv_block(x3_malig_merged,64,activation='relu') #outputs 25 + 16 ch = 41 x4_merged = merge([x4_malig,x4_ident],mode='concat', concat_axis=1) x5_malig = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41 xpool_malig = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_malig)) xout_malig = Dense(1, name='o_mal', activation='relu')(xpool_malig) #relu output x5_diam = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41 xpool_diam = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_diam)) xout_diam = Dense(1, name='o_diam', activation='relu')(xpool_diam) #relu output x5_lob = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41 xpool_lob = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_lob)) xout_lob = Dense(1, name='o_lob', activation='relu')(xpool_lob) #relu output x5_spic = conv_block(x4_merged,64) #outputs 25 + 16 ch = 41 xpool_spic = BatchNormalization(momentum=0.995)(GlobalMaxPooling3D()(x5_spic)) xout_spic = Dense(1, name='o_spic', activation='relu')(xpool_spic) #relu output model = Model(input=xin,output=[xout_diam, xout_lob, xout_spic, xout_malig]) if input_shape[1] == 32: lr_start = .01 elif input_shape[1] == 64: lr_start = .003 elif input_shape[1] == 128: lr_start = .002 # elif input_shape[1] == 96: # lr_start = 5e-4 opt = Nadam(lr_start,clipvalue=1.0) print 'compiling model' model.compile(optimizer=opt,loss='mse',loss_weights={'o_diam':0.06, 'o_lob':0.5, 'o_spic':0.5, 'o_mal':1.0}) return model
def up_conv_block_seunet(x, x2, f, dropout=False): x = UpSampling3D(size=(2, 2, 2))(x) channels_nb = K.int_shape(x2)[-1] if channels_nb==16: channels_nb_bottleneck = channels_nb // 16 else: channels_nb_bottleneck = channels_nb // 32 x3=GlobalMaxPooling3D()(x2) x3 = Dense(channels_nb_bottleneck, activation='relu')(x3) x3 = Dense(channels_nb, activation='sigmoid')(x3) y = Lambda(lambda x: attetion(x))([x2, x3]) x = Concatenate(axis=-1)([x, y]) f_new = f + channels_nb x = Conv3D(f_new, (3, 3, 3), padding="same")(x) x = Conv3D(f_new, (3, 3, 3), padding="same")(x) x = BatchNormalization(axis=-1)(x) if dropout: x = Dropout(0.5)(x) x = Activation("relu")(x) return x
def build_model(input_shape): xin = Input(input_shape) #shift the below down by one x1 = conv_block(xin, 8, activation='relu') x1_ident = AveragePooling3D()(xin) x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1) x2_1 = conv_block(x1_merged, 24, activation='relu') #outputs 37 ch x2_ident = AveragePooling3D()(x1_ident) x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1) #by branching we reduce the #params x3_ident = AveragePooling3D()(x2_ident) x3_malig = conv_block(x2_merged, 36, activation='relu') #outputs 25 + 16 ch = 41 x3_malig_merged = merge([x3_malig, x3_ident], mode='concat', concat_axis=1) x4_ident = AveragePooling3D()(x3_ident) x4_malig = conv_block(x3_malig_merged, 48, activation='relu') #outputs 25 + 16 ch = 41 x4_malig_merged = merge([x4_malig, x4_ident], mode='concat', concat_axis=1) x5_malig = conv_block(x4_malig_merged, 64) #outputs 25 + 16 ch = 41 xpool_malig = BatchNormalization(momentum=0.995)( GlobalMaxPooling3D()(x5_malig)) xout_malig = Dense(1, name='o_mal', activation='sigmoid')(xpool_malig) #sigmoid output model = Model(input=xin, output=xout_malig) if input_shape[1] == 32: lr_start = .01 elif input_shape[1] == 64: lr_start = .003 elif input_shape[1] == 128: lr_start = .002 # elif input_shape[1] == 96: # lr_start = 5e-4 opt = Nadam(lr_start, clipvalue=1.0) print 'compiling model' model.compile(optimizer=opt, loss='mae') return model
def build_model(input_shape): xin = Input(input_shape) x1 = conv_block(xin,8,activation='crelu') x1_ident = AveragePooling3D()(xin) x1_merged = concatenate([x1, x1_ident], axis=1) x2_1 = conv_block(x1_merged,24,activation='crelu',init='orthogonal') x2_ident = AveragePooling3D()(x1_ident) x2_merged = concatenate([x2_1,x2_ident], axis=1) #by branching we reduce the #params x3_1 = conv_block(x2_merged,36,activation='crelu',init='orthogonal') x3_ident = AveragePooling3D()(x2_ident) x3_merged = concatenate([x3_1,x3_ident], axis=1) x4_1 = conv_block(x3_merged,36,activation='crelu',init='orthogonal') x4_ident = AveragePooling3D()(x3_ident) x4_merged = concatenate([x4_1,x4_ident], axis=1) x5_1 = conv_block(x4_merged,64,pool=False,init='orthogonal') xpool = BatchNormalization()(GlobalMaxPooling3D()(x5_1)) xout = dense_branch(xpool,outsize=1,activation='sigmoid') model = Model(input=xin,output=xout) if input_shape[1] == 32 : lr_start = 1e-5 elif input_shape[1] == 64: lr_start = 1e-5 elif input_shape[1] == 128: lr_start = 1e-4 elif input_shape[1] == 96: lr_start = 5e-4 elif input_shape[1] == 16: lr_start = 1e-6 opt = Nadam(lr_start,clipvalue=1.0) print('compiling model') model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) return model
def build_model(input_shape): xin = Input(input_shape) #shift the below down by one x1 = conv_block(xin, 8, norm=True, drop_rate=0) #outputs 9 ch x1_ident = AveragePooling3D()(xin) x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1) x2_1 = conv_block(x1_merged, 24, norm=True, drop_rate=0) #outputs 16+9 ch = 25 x2_ident = AveragePooling3D()(x1_ident) x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1) #by branching we reduce the #params x3_1 = conv_block(x2_merged, 64, norm=True, drop_rate=0) #outputs 25 + 16 ch = 41 x3_ident = AveragePooling3D()(x2_ident) x3_merged = merge([x3_1, x3_ident], mode='concat', concat_axis=1) x4_1 = conv_block(x3_merged, 72, norm=True, drop_rate=0) #outputs 25 + 16 ch = 41 x4_ident = AveragePooling3D()(x3_ident) x4_merged = merge([x4_1, x4_ident], mode='concat', concat_axis=1) x5_1 = conv_block(x4_merged, 72, norm=True, pool=False, drop_rate=0) #outputs 25 + 16 ch = 41 xpool = GlobalMaxPooling3D()(x5_1) xpool_norm = BatchNormalization()(xpool) #xpool_norm = GaussianDropout(.1)(xpool_norm) #from here let's branch and predict different things xout_diam = dense_branch(xpool_norm, name='o_d', outsize=1, activation='relu') #sphericity # xout_spher= dense_branch(xpool_norm,name='o_spher',outsize=4,activation='softmax') # xout_text = dense_branch(xpool_norm,name='o_t',outsize=4,activation='softmax') #calcification # xout_calc = dense_branch(xpool_norm,name='o_c',outsize=7,activation='softmax') xout_cad_falsepositive = dense_branch(xpool_norm, name='o_fp', outsize=3, activation='softmax') # xout_cat = merge([xout_text,xout_spher,xout_calc],name='o_cat',mode='concat', concat_axis=1) xout_margin = dense_branch(xpool_norm, name='o_marg', outsize=1, activation='sigmoid') xout_lob = dense_branch(xpool_norm, name='o_lob', outsize=1, activation='sigmoid') xout_spic = dense_branch(xpool_norm, name='o_spic', outsize=1, activation='sigmoid') xout_malig = dense_branch(xpool_norm, name='o_mal', outsize=1, activation='sigmoid') # xout_numeric = merge([xout_margin, xout_lob, xout_spic, xout_malig],name='o_num',mode='concat',concat_axis=1) model = Model(input=xin, output=[ xout_diam, xout_lob, xout_spic, xout_malig, xout_cad_falsepositive ]) if input_shape[1] == 32: lr_start = .005 elif input_shape[1] == 64: lr_start = .001 elif input_shape[1] == 128: lr_start = 1e-4 elif input_shape[1] == 96: lr_start = 5e-4 opt = Nadam(lr_start, clipvalue=1.0) print 'compiling model' model.compile(optimizer=opt, loss={ 'o_d': 'mse', 'o_lob': 'binary_crossentropy', 'o_spic': 'binary_crossentropy', 'o_mal': 'binary_crossentropy', 'o_fp': 'categorical_crossentropy' }, loss_weights={ 'o_d': 1.0, 'o_lob': 5.0, 'o_spic': 5.0, 'o_mal': 5.0, 'o_fp': 5.0 }) return model
def build_model(input_shape): xin = Input(input_shape) #shift the below down by one x1 = conv_block(xin, 8, activation='crelu') #outputs 13 ch x1_ident = AveragePooling3D()(xin) x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1) x2_1 = conv_block(x1_merged, 24, activation='crelu', init=looks_linear_init) #outputs 37 ch x2_ident = AveragePooling3D()(x1_ident) x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1) #by branching we reduce the #params x3_ident = AveragePooling3D()(x2_ident) x3_diam = conv_block(x2_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x3_lob = conv_block(x2_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x3_spic = conv_block(x2_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x3_malig = conv_block(x2_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x3_diam_merged = merge([x3_diam, x3_ident], mode='concat', concat_axis=1) x3_lob_merged = merge([x3_lob, x3_ident], mode='concat', concat_axis=1) x3_spic_merged = merge([x3_spic, x3_ident], mode='concat', concat_axis=1) x3_malig_merged = merge([x3_malig, x3_ident], mode='concat', concat_axis=1) x4_ident = AveragePooling3D()(x3_ident) x4_diam = conv_block(x3_diam_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x4_lob = conv_block(x3_lob_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x4_spic = conv_block(x3_spic_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x4_malig = conv_block(x3_malig_merged, 36, activation='crelu', init=looks_linear_init) #outputs 25 + 16 ch = 41 x4_diam_merged = merge([x4_diam, x4_ident], mode='concat', concat_axis=1) x4_lob_merged = merge([x4_lob, x4_ident], mode='concat', concat_axis=1) x4_spic_merged = merge([x4_spic, x4_ident], mode='concat', concat_axis=1) x4_malig_merged = merge([x4_malig, x4_ident], mode='concat', concat_axis=1) x5_diam = conv_block(x4_diam_merged, 64, pool=False, init=looks_linear_init) #outputs 25 + 16 ch = 41 x5_lob = conv_block(x4_lob_merged, 64, pool=False, init=looks_linear_init) #outputs 25 + 16 ch = 41 x5_spic = conv_block(x4_spic_merged, 64, pool=False, init=looks_linear_init) #outputs 25 + 16 ch = 41 x5_malig = conv_block(x4_malig_merged, 64, pool=False, init=looks_linear_init) #outputs 25 + 16 ch = 41 xpool_diam = BatchNormalization()(GlobalMaxPooling3D()(x5_diam)) xpool_lob = BatchNormalization()(GlobalMaxPooling3D()(x5_lob)) xpool_spic = BatchNormalization()(GlobalMaxPooling3D()(x5_spic)) xpool_malig = BatchNormalization()(GlobalMaxPooling3D()(x5_malig)) #from here let's branch and predict different things xout_diam = dense_branch(xpool_diam, name='o_d', outsize=1, activation='relu') xout_lob = dense_branch(xpool_lob, name='o_lob', outsize=1, activation='sigmoid') xout_spic = dense_branch(xpool_spic, name='o_spic', outsize=1, activation='sigmoid') xout_malig = dense_branch(xpool_malig, name='o_mal', outsize=1, activation='sigmoid') #sphericity # xout_spher= dense_branch(xpool_norm,name='o_spher',outsize=4,activation='softmax') # xout_text = dense_branch(xpool_norm,name='o_t',outsize=4,activation='softmax') #calcification # xout_calc = dense_branch(xpool_norm,name='o_c',outsize=7,activation='softmax') model = Model(input=xin, output=[xout_diam, xout_lob, xout_spic, xout_malig]) if input_shape[1] == 32: lr_start = .003 elif input_shape[1] == 64: lr_start = .001 elif input_shape[1] == 128: lr_start = 1e-4 elif input_shape[1] == 96: lr_start = 5e-4 opt = Nadam(lr_start, clipvalue=1.0) print 'compiling model' model.compile(optimizer=opt, loss={ 'o_d': 'mse', 'o_lob': 'binary_crossentropy', 'o_spic': 'binary_crossentropy', 'o_mal': 'binary_crossentropy' }, loss_weights={ 'o_d': 1.0, 'o_lob': 5.0, 'o_spic': 5.0, 'o_mal': 5.0 }) return model