def run(npath, ppath): from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_val_score from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import plot_model weightTrue = 0.8 class_weight = {0: (1 - weightTrue), 1: weightTrue} # Setup all data for inputs setup_data(npath, ppath) # Retrieve data for model X, Y = get_model_data_full(data) # create model model = KerasClassifier(build_fn=create_model, epochs=50, batch_size=32, verbose=1) plot_model(create_model(), show_shapes=True) # evaluate using 10-fold cross validation kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=1543) results = cross_val_score(model, X, Y, cv=kfold) print('=' * 30) print(results.mean())
def run_grid(npath, ppath, weightTrue=0.75): from sklearn.model_selection import GridSearchCV from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import plot_model weightTrue = 0.8 class_weight = {0: (1 - weightTrue), 1: weightTrue} # Setup all data for inputs setup_data(npath, ppath) # Retrieve data for model X, Y = get_model_data_full(data) # Define parameters to grid search param_grid = define_grid() # create model model = KerasClassifier(build_fn=create_model, epochs=50, batch_size=64, verbose=1) plot_model(create_model(), show_shapes=True) # Setup grid object grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1) # Fit model and find scores grid_result = grid.fit(X, Y, validation_split=0.1, class_weight=class_weight) print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param))
def run(npath, ppath): from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_val_score from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import plot_model from keras.callbacks import EarlyStopping from ml_statistics import BaseStatistics from keras import optimizers import numpy as np # Params weightTrue = 0.8 class_weight = {0: (1 - weightTrue), 1: weightTrue} earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto') opt = optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) # opt = optimizers.RMSprop(lr=0.001, decay=0.0) # Setup all data for inputs setup_data(npath, ppath) # Retrieve data for model X, Y = get_model_data_full(data) X = X * 100000 print X print Y.shape # plot_model(create_model(), show_shapes=True) # evaluate using 10-fold cross validation kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=17) # results = cross_val_score(model, X, Y, cv=kfold) # print('='*30) # print(results.mean()) kfold.get_n_splits(X, Y) for train_index, test_index in kfold.split(X, Y): # TRAIN DATA X_train, y_train = X[train_index, :], Y[train_index] # TEST DATA X_test, y_test = X[test_index, :], Y[test_index] # create model # model = KerasClassifier(build_fn=create_model, epochs=50, batch_size=32, verbose=1) model = create_model() # Compile Model model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) print(model.summary()) history = model.fit(X_train, y_train, validation_split=0.3, epochs=100, batch_size=128, callbacks=[earlyStopping], class_weight=class_weight, verbose=0) print(history.history.keys()) Y_pred = model.predict(X_test, verbose=0) stats = BaseStatistics(y_test, Y_pred) print stats