def train_and_evaluate_MLP(training_data, test_data, params, num_training_runs=100): X_tr, y_tr = training_data X_te, y_te = test_data AUC_trs = [] AUC_tes = [] for i in range(num_training_runs): e_stop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=5, mode='min') callbacks = [e_stop] optimizer = keras.optimizers.Adam(lr=params['learning_rate']) model = Sequential() model.add( Dense(params['num_neurons'], input_dim=7, kernel_initializer='glorot_uniform', activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))) model.add(Dropout(params['dropout_rate'])) model.add( Dense(2, kernel_initializer='glorot_uniform', activation='softmax', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))) model.compile(loss='binary_crossentropy', optimizer=optimizer) history = model.fit(X_tr, y_tr, callbacks=callbacks, validation_data=(X_te, y_te), epochs=100, batch_size=params['batch_size'], verbose=0) probs_tr = model.predict(X_tr, batch_size=8) probs_te = model.predict(X_te, batch_size=8) score_tr = roc_auc_score(y_tr, probs_tr) score_te = roc_auc_score(y_te, probs_te) AUC_trs.append(score_tr) AUC_tes.append(score_te) keras.backend.clear_session() return AUC_trs, AUC_tes
x = concatenate([dense1, dense2]) x = Dense(tune['num_neurons_final'], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'], kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x) x= Dropout(tune['dropout_rate'])(x) if def_params['out_activation'] == 'softmax': output = Dense(2,kernel_initializer = def_params['weight_init'],activation= def_params['out_activation'], kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x) else: output = Dense(1,kernel_initializer = def_params['weight_init'],activation= def_params['out_activation'], kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x) optimizer = keras.optimizers.Adam(lr = tune['learning_rate']) model = Model(inputs=[img_input, clin_input], outputs=[output]) model.compile(loss=def_params['loss_func'], optimizer = optimizer) e_stop = EarlyStopping(monitor = 'val_loss', min_delta = def_params['min_delta'], patience = def_params['iter_patience'], mode='auto') callbacks = [e_stop] history = model.fit({'image_input' : img_X_tr,'clinical_input' : clin_X_tr}, y_tr, callbacks = callbacks,validation_data= ([img_X_val, clin_X_val],y_val), epochs=def_params['epochs'], batch_size= tune['batch_size'], verbose=0) probs_val = model.predict([img_X_val,clin_X_val],batch_size = 8) score_val = roc_auc_score(y_val, probs_val) i +=1 if i%10 == 0: print(i) if score_val > best_AUC: best_AUC = score_val
def train_and_evaluate_CNN(training_data, test_data, params, num_training_runs=100): X_tr, y_tr = training_data X_te, y_te = test_data AUC_trs = [] AUC_tes = [] for i in range(num_training_runs): model = Sequential() model.add( Conv3D(params['num_filters'][0], params['arc_params']['filter_size'], strides=params['arc_params']['filter_stride'], padding="same", kernel_regularizer=keras.regularizers.l2(params['l2_reg']), input_shape=(156, 192, 64, 1))) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size'])) model.add( Conv3D(params['num_filters'][1], params['arc_params']['filter_size'], strides=params['arc_params']['filter_stride'], padding="same", kernel_regularizer=keras.regularizers.l2(params['l2_reg']))) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size'])) model.add( Conv3D(params['num_filters'][2], params['arc_params']['filter_size'], strides=params['arc_params']['filter_stride'], padding="same", kernel_regularizer=keras.regularizers.l2(params['l2_reg']))) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size'])) model.add(Flatten()) model.add( Dense(params['num_neurons_in_powers'] * params['num_filters'][2], activation='relu', kernel_regularizer=keras.regularizers.l2(params['l2_reg']))) model.add(Dropout(params['dropout'])) model.add( Dense(2, activation='softmax', kernel_regularizer=keras.regularizers.l2(params['l2_reg']))) optimizer = keras.optimizers.Adam(lr=params['learning_rate']) model.compile(loss='binary_crossentropy', optimizer=optimizer) parallel_model = multi_gpu_model(model, 2) parallel_model.compile(loss='binary_crossentropy', optimizer=optimizer) e_stop = EarlyStopping(monitor='val_loss', min_delta=0.02, patience=2, mode='auto') callbacks = [e_stop] start = time.time() history = parallel_model.fit(X_tr, y_tr, callbacks=callbacks, validation_data=(X_te, y_te), batch_size=params['batch_size'], epochs=20, verbose=0) end = time.time() model.set_weights(parallel_model.get_weights()) probs_tr = model.predict(X_tr, batch_size=8) probs_te = model.predict(X_te, batch_size=8) score_tr = roc_auc_score(y_tr, probs_tr) score_te = roc_auc_score(y_te, probs_te) AUC_trs.append(score_tr) AUC_tes.append(score_te) print('Training time for run %i was around %i minutes' % (i, np.floor((end - start) / 60))) keras.backend.clear_session() return AUC_trs, AUC_tes
def train_and_evaluate_feat_extract(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params, num_training_runs=100): AUC_trs = [] AUC_tes = [] for i in range(num_training_runs): img_input = Input(shape=(img_X_tr.shape[1], ), name='image_input') clin_input = Input(shape=(clin_X_tr.shape[1], ), name='clinical_input') dense1 = Dense(params['num_neurons_embedding'][0], activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(clin_input) dense2 = Dense(params['num_neurons_embedding'][1], activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(img_input) x = concatenate([dense1, dense2]) x = Dense(params['num_neurons_final'], activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x) x = Dropout(params['dropout_rate'])(x) output = Dense(2, activation='softmax', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x) optimizer = keras.optimizers.Adam(lr=params['learning_rate']) model = Model(inputs=[img_input, clin_input], outputs=[output]) model.compile(loss='binary_crossentropy', optimizer=optimizer) e_stop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=5, mode='auto') callbacks = [e_stop] history = model.fit( { 'image_input': img_X_tr, 'clinical_input': clin_X_tr }, y_tr, callbacks=callbacks, validation_data=([img_X_te, clin_X_te], y_te), epochs=100, batch_size=params['batch_size'], verbose=0) probs_tr = model.predict([img_X_tr, clin_X_tr], batch_size=8) probs_te = model.predict([img_X_te, clin_X_te], batch_size=8) score_tr = roc_auc_score(y_tr, probs_tr) score_te = roc_auc_score(y_te, probs_te) AUC_trs.append(score_tr) AUC_tes.append(score_te) keras.backend.clear_session() return AUC_trs, AUC_tes
def train_and_evaluate_end_to_end(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params, num_training_runs=100): AUC_trs = [] AUC_tes = [] for i in range(num_training_runs): img_input = Input(shape=(156, 192, 64, 1), name='image_input') clin_input = Input(shape=(clin_X_tr.shape[1], ), name='clinical_input') x1 = Conv3D(params['num_filters'][0], (3, 3, 3), strides=(1, 1, 1), padding="same", kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(img_input) x1 = Activation('relu')(x1) x1 = MaxPooling3D(pool_size=(3, 3, 3))(x1) x1 = Conv3D(params['num_filters'][1], (3, 3, 3), strides=(1, 1, 1), padding="same", kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x1) x1 = Activation('relu')(x1) x1 = MaxPooling3D(pool_size=(3, 3, 3))(x1) x1 = Conv3D(params['num_filters'][2], (3, 3, 3), strides=(1, 1, 1), padding="same", kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x1) x1 = Activation('relu')(x1) x1 = MaxPooling3D(pool_size=(3, 3, 3))(x1) x1 = Flatten()(x1) x1 = Dense(params['num_filters'][2] * 2, activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x1) x1 = Dropout(params['dropout_rate'])(x1) x1 = Dense(params['num_neurons_embedding'][1], activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x1) x2 = Dense(params['num_neurons_MLP'], activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(clin_input) x2 = Dropout(params['dropout_rate'])(x2) x2 = Dense(params['num_neurons_embedding'][0], activation='relu', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x2) x = concatenate([x1, x2]) x = Dense(params['num_neurons_final'], activation='relu', kernel_regularizer=keras.regularizers.l1( params['l2_ratio']))(x) x = Dropout(params['dropout_rate'])(x) output = Dense(2, activation='softmax', kernel_regularizer=keras.regularizers.l2( params['l2_ratio']))(x) model = Model(inputs=[img_input, clin_input], outputs=[output]) optimizer = keras.optimizers.Adam(lr=params['learning_rate']) model.compile(loss='binary_crossentropy', optimizer=optimizer) e_stop = EarlyStopping(monitor='val_loss', min_delta=0.02, patience=2, mode='auto') callbacks = [e_stop] start = time.time() history = model.fit( { 'image_input': img_X_tr, 'clinical_input': clin_X_tr }, #inputs y_tr, #output callbacks=callbacks, validation_data=([img_X_te, clin_X_te], y_te), epochs=20, batch_size=params['batch_size'], verbose=0) end = time.time() probs_tr = model.predict([img_X_tr, clin_X_tr], batch_size=8) probs_te = model.predict([img_X_te, clin_X_te], batch_size=8) score_tr = roc_auc_score(y_tr, probs_tr) score_te = roc_auc_score(y_te, probs_te) AUC_trs.append(score_tr) AUC_tes.append(score_te) print('Training time for run %i was around %i minutes' % (i, np.floor((end - start) / 60))) keras.backend.clear_session() return AUC_trs, AUC_tes