def create_model(X_train,y_train,X_val,y_val,X_test,y_test): epochs = 40 es_patience = 5 lr_patience = 3 dropout = None depth = 25 nb_dense_block = 3 nb_filter = 16 growth_rate = 18 bn = True reduction_ = 0.5 bs = 32 lr = 1E-5 opt = {{choice([Adam(lr=1E-5), RMSprop(lr=1E-5),Adadelta(),Adamax(lr=1E-5),Nadam()])}} weight_file = 'hyperas_dn_lr_optimizer_wt_3Oct_1549.h5' nb_classes = 1 img_dim = (2,96,96) n_channels = 2 model = DenseNet(depth=depth, nb_dense_block=nb_dense_block, growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout,activation='sigmoid', input_shape=img_dim,include_top=True, bottleneck=bn,reduction=reduction_, classes=nb_classes,pooling='avg', weights=None) model.summary() model.compile(loss=binary_crossentropy, optimizer=opt, metrics=['accuracy']) es = EarlyStopping(monitor='val_loss', patience=es_patience,verbose=1) checkpointer = ModelCheckpoint(filepath=weight_file,verbose=1, save_best_only=True) lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=lr_patience, min_lr=0.5e-6,verbose=1) model.fit(X_train,y_train, batch_size=bs, epochs=epochs, callbacks=[lr_reducer,checkpointer,es], validation_data=(X_val,y_val), verbose=2) score, acc = model.evaluate(X_val,y_val) print("current val accuracy:%0.3f"%acc) pred = model.predict(X_val) auc_score = roc_auc_score(y_val,pred) print("current auc_score ------------------> %0.3f"%auc_score) model = load_model(weight_file) #This is the best model score, acc = model.evaluate(X_val,y_val) print("Best saved model val accuracy:%0.3f"% acc) pred = model.predict(X_val) auc_score = roc_auc_score(y_val,pred) print("best saved model auc_score ------------------> %0.3f"%auc_score) return {'loss': -auc_score, 'status': STATUS_OK, 'model': model}
# Create the model (without loading weights) model = DenseNet(depth, nb_dense_block, growth_rate, nb_filter, dropout_rate=dropout_rate, input_shape=img_dim, weights=None) print('Model created') model.summary() optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc']) print('Finished compiling') (trainX, trainY), (testX, testY) = cifar10.load_data() trainX = trainX.astype('float32') testX = testX.astype('float32') trainX /= 255. testX /= 255. Y_train = np_utils.to_categorical(trainY, nb_classes) Y_test = np_utils.to_categorical(testY, nb_classes) generator = ImageDataGenerator(rotation_range=15,
# Create the model (without loading weights) model = DenseNet(depth, nb_dense_block, growth_rate, nb_filter, dropout_rate=dropout_rate, input_shape=img_dim, weights=None) print("Model created") model.summary() optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"]) print("Finished compiling") (trainX, trainY), (testX, testY) = cifar10.load_data() trainX = trainX.astype('float32') testX = testX.astype('float32') trainX /= 255. testX /= 255. Y_train = np_utils.to_categorical(trainY, nb_classes) Y_test = np_utils.to_categorical(testY, nb_classes) generator = ImageDataGenerator(rotation_range=15,
def fit_model(X_train, y_train, X_val, y_val): epochs = 30 #input_shape = (1,96,96) es_patience = 5 lr_patience = 5 #dense_filter = 512 #dropout = 0.76 dropout1 = None depth = 13 #40 nb_dense_block = 3 nb_filter = 18 growth_rate = 12 weight_decay = 1E-4 lr = 3E-4 weight_file = 'keras_densenet_simple_wt_28Sept.h5' nb_classes = 1 img_dim = (2, 96, 96) n_channels = 2 model = DenseNet(depth=depth, nb_dense_block=nb_dense_block, growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout1, activation='sigmoid', input_shape=img_dim, include_top=True, bottleneck=True, reduction=0.5, classes=nb_classes, pooling='avg', weights=None) model.summary() opt = Adam(lr=lr) model.compile(loss=binary_crossentropy, optimizer=opt, metrics=['accuracy']) es = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1) #es = EarlyStopping(monitor='val_acc', patience=es_patience,verbose=1,restore_best_weights=True) checkpointer = ModelCheckpoint(filepath=weight_file, verbose=1, save_best_only=True) lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=lr_patience, min_lr=0.5e-6, verbose=1) model.fit(X_train, y_train, batch_size=64, epochs=epochs, callbacks=[es, lr_reducer, checkpointer], validation_data=(X_val, y_val), verbose=2) score, acc = model.evaluate(X_val, y_val) print('current Test accuracy:', acc) pred = model.predict(X_val) auc_score = roc_auc_score(y_val, pred) print("current auc_score ------------------> ", auc_score) model = load_model(weight_file) #This is the best model score, acc = model.evaluate(X_val, y_val) print('Best saved model Test accuracy:', acc) pred = model.predict(X_val) auc_score = roc_auc_score(y_val, pred) print("best saved model auc_score ------------------> ", auc_score) threshold = 0.6 pred_scores2 = (pred > threshold).astype(int) test_acc2 = accuracy_score(y_val, pred_scores2) print('Test accuracy 0.6:', test_acc2) return auc_score, model
def create_model(): epochs = 4 es_patience = 7 lr_patience = 5 dropout = None #depth = {{choice([7,13,19,25,31])}} #nb_dense_block = {{choice([2,3])}} depth = 7 nb_dense_block = 2 nb_filter = 16 #growth_rate = {{choice([6,10,14,18])}} growth_rate = 10 bn = True reduction_ = 0.5 bs = 32 lr = 1E-3 #########################################################CHange file name########################################## nb_classes = 1 img_dim = (2, 96, 96) n_channels = 2 model = DenseNet(depth=depth, nb_dense_block=nb_dense_block, growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout, activation='sigmoid', input_shape=img_dim, include_top=True, bottleneck=bn, reduction=reduction_, classes=nb_classes, pooling='avg', weights=None) model.summary() opt = Adam(lr=lr) model.compile(loss=binary_crossentropy, optimizer=opt, metrics=['accuracy']) """es = EarlyStopping(monitor='val_loss', patience=es_patience,verbose=1) #es = EarlyStopping(monitor='val_acc', patience=es_patience,verbose=1,restore_best_weights=True) checkpointer = ModelCheckpoint(filepath=weight_file,verbose=1, save_best_only=True) lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=lr_patience, min_lr=0.5e-6,verbose=1) model.fit(X_train,y_train, batch_size=bs, epochs=epochs, callbacks=[es,lr_reducer,checkpointer], validation_data=(X_val,y_val), verbose=2) score, acc = model.evaluate(X_test, y_test) print('current Test accuracy:', acc) pred = model.predict(X_test) auc_score = roc_auc_score(y_test,pred) print("current auc_score ------------------> ",auc_score)""" """model = load_model(weight_file) #This is the best model score, acc = model.evaluate(X_test, y_test) print('Best saved model Test accuracy:', acc) pred = model.predict(X_test) auc_score = roc_auc_score(y_test,pred) print("best saved model auc_score ------------------> ",auc_score)""" return model
model = DenseNet( depth=depth, nb_dense_block=nb_dense_block, growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout_rate, input_shape=img_dim, weights=None, ) print("Model created") model.summary() optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["acc"]) print("Finished compiling") (trainX, trainY), (testX, testY) = cifar10.load_data() trainX = trainX.astype("float32") testX = testX.astype("float32") trainX /= 255.0 testX /= 255.0 Y_train = np_utils.to_categorical(trainY, nb_classes) Y_test = np_utils.to_categorical(testY, nb_classes) generator = ImageDataGenerator(rotation_range=15,
def create_model(X_train, y_train, X_val, y_val, X_test, y_test): epochs = 1 es_patience = 5 lr_patience = 3 dropout = None depth = {{choice([7, 16, 25, 34])}} nb_dense_block = {{choice([2, 3, 4])}} nb_filter = 16 growth_rate = {{choice([6, 14, 22, 30])}} bn = True reduction_ = 0.5 bs = 32 lr = {{choice([1E-2, 5E-2, 1E-3, 5E-3, 1E-4, 5E-4, 1E-5, 5E-5])}} weight_file = 'keras_densenet_simple_wt_02Oct_1300.h5' nb_classes = 1 img_dim = (2, 96, 96) n_channels = 2 print("Depth: ", depth, " Growth_rate: ", growth_rate, " Nb_dense_block: ", nb_dense_block, " lr: ", lr) model = DenseNet(depth=depth, nb_dense_block=nb_dense_block, growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout, activation='sigmoid', input_shape=img_dim, include_top=True, bottleneck=bn, reduction=reduction_, classes=nb_classes, pooling='avg', weights=None) model.summary() opt = Adam(lr=lr) model.compile(loss=binary_crossentropy, optimizer=opt, metrics=['accuracy']) es = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1) checkpointer = ModelCheckpoint(filepath=weight_file, verbose=1, save_best_only=True) lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=lr_patience, min_lr=0.5e-6, verbose=1) model.fit(X_train, y_train, batch_size=bs, epochs=epochs, callbacks=[lr_reducer, es], validation_data=(X_val, y_val), verbose=2) score, acc = model.evaluate(X_val, y_val) print('current val accuracy:', acc) pred = model.predict(X_val) auc_score = roc_auc_score(y_val, pred) print("current auc_score ------------------> ", auc_score) #model = load_model(weight_file) #This is the best model #score, acc = model.evaluate(X_val,y_val) #print('Best saved model val accuracy:', acc) #pred = model.predict(X_val) #auc_score = roc_auc_score(y_val,pred) #print("best saved model auc_score ------------------> ",auc_score) return {'loss': -auc_score, 'status': STATUS_OK, 'model': model}