Ejemplo n.º 1
0
# print(history.history.keys())  # summarize history for accuracy
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))

print("evaluate TEST-set (out of training set)")
print("confusion_matrix")
Y_predict = model.predict(X_test) >= 0.5
print(confusion_matrix(Y_predict, Y_test))
print("cohen_kappa_score = ", cohen_kappa_score(Y_test, Y_predict))
print("accuracy_score    = ", accuracy_score(Y_test, Y_predict))

# serialize model to JSON
model_json = model.to_json()
with open(HOME_DIR + '/ML_DATA/GFK/model/lotame_model.json', "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(HOME_DIR + '/ML_DATA/GFK/model/lotame_model_weights.h5')
print("Saved model to disk")
'''
plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    return model


model = KerasClassifier(build_fn=create_model(), verbose=0)
batch_size = [32, 64, 100, 200, 300]
epochs = [10, 100, 300]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=skf)
grid_result = grid.fit(X_train, Y_train)
print("==============Grid Search================")
print("Best: %f using %s" %
      (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))

model_json = model.to_json()
with open(result_path + 'tst.json', "w") as json_file:
    json.dump(model_json, json_file)
model.save_weights(result_path + 'tst.h5')
print("saved model to disk")
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))

# # evaluate the model
# scores = model.evaluate(X, Y, verbose=0)
# print("%s: %.2f%%" % (model.metrics_names[1], history[1] * 100))

# serialize model to JSON
model_json = model.to_json()
with open("keras-traffic-sign-model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("keras-traffic-sign-weights.h5")
print("Saved model to disk")

print("Testing")
# load json and create model
json_file = open('keras-traffic-sign-model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)

# load weights into new model
loaded_model.load_weights("keras-traffic-sign-weights.h5")
print("Loaded model from disk")

# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy',
Ejemplo n.º 4
0
                                                    random_state=42)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)

model = KerasClassifier(build_fn=createmodel, verbose=0)
batch_size = [20, 30, 40]
epochs = [3, 4, 5]
param_grid = dict(batch_size=batch_size, epochs=epochs)
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
grid_result = grid.fit(X_train, Y_train)

# summarize results
print("Best: %f using %s" %
      (grid_result.best_score_, grid_result.best_params_))

model = createmodel()
model.fit(X_train, Y_train, epochs=4, batch_size=40, verbose=2)
twt = [
    'A lot of good things are happening. We are respected again throughout the world, and that\'s a great thing'
]

score, acc = model.evaluate(X_test, Y_test, verbose=2, batch_size=40)
print(score)
print(acc)
#save to disk
model1_json = model.to_json()
with open('model1.json', 'w') as json_file:
    json_file.write(model1_json)
model.save_weights('model1.h5')
Ejemplo n.º 5
0
        Dense(
            units=8,
            activation='relu',
            kernel_initializer='normal',
        ))
    classificador.add(Dropout(0.1))
    classificador.add(Dense(units=3, activation='softmax'))
    classificador.compile(optimizer='adam',
                          loss='categorical_crossentropy',
                          metrics=['categorical_accuracy'])
    return classificador


classificador = KerasClassifier(build_fn=criar_rede,
                                epochs=2000,
                                batch_size=10)

resultado = cross_val_score(estimator=classificador,
                            X=previsores,
                            y=classe,
                            cv=10,
                            scoring='accuracy')
#Salvar o classificador
classificador_json = classificador.to_json()
with open("classificador_iris.json", "w") as json_file:
    json_file.write(classificador_json)
classificador.save_weights("classificador_iris.h5")

media = resultado.mean()
desvio = resultado.std()
              'optimizer': ['adam', 'rmsprop']
             }
grid_search = GridSearchCV(estimator = classifier,
                           param_grid = parameters,
                           scoring = 'accuracy',
                           cv = 10)
grid_search = grid_search.fit(x_train, y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
print('best_parameters={0} , best_accuracy={1}'.format(best_parameters, best_accuracy))
# serialize model to JSON
model_json = classifier.to_json()
with open("best_classifier.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
classifier.save_weights("best_classifier.h5")
print("Saved best_classifier to disk")

from sklearn.feature_extraction.text import CountVectorizer  
correct_preictions = 0
false_preictions = 0
with open('pos_testing.txt') as f:
    for i, line in enumerate(f):
        if i >= 250:
            break
        if classifier.predict(vectorizer.transform([line]).toarray()) >= 0.5:
            correct_preictions = correct_preictions + 1
        else:
            false_preictions = false_preictions + 1
        print('. {0}'.format(i))
Ejemplo n.º 7
0
	grid_result = grid.fit(base_model, outputs)

	# summarize results
	print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
	for params, mean_score, scores in grid_result.grid_scores_:
	    print("%f (%f) with: %r" % (scores.mean(), scores.std(), params))

    


    early_stopping = EarlyStopping(patience=20)
    checkpointer = ModelCheckpoint('inception_resnet_bottleneck_drug_best.h5', verbose=1, save_best_only=True)

    ImageFile.LOAD_TRUNCATED_IMAGES = True

    model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=1000, callbacks=[early_stopping, checkpointer], validation_data=val_batches, validation_steps=num_valid_steps)
    model.save_weights('inception_resnet_bottleneck_drug_weights.h5')
    model.save('inception_resnet_bottleneck_drug.h5')

#     for layer in model.layers[-31:]:
#         layer.trainable=True
#     for layer in model.layers[:-31]:
#         layer.trainable=False

#     model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])

#     checkpointer = ModelCheckpoint('./resnet50_best_safety.h5', verbose=1, save_best_only=True)

#     model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=1000, callbacks=[early_stopping, checkpointer], validation_data=val_batches, validation_steps=num_valid_steps)
#     model.save('resnet50_safety.h5')
Ejemplo n.º 8
0
def train(should_train):
    dataset = pd.read_csv('Churn_Modelling.csv')
    x = dataset.iloc[:, 3:13].values
    y = dataset.iloc[:, 13].values

    # preprocessing
    # Encoding categorical data
    # Encoding the Independent Variable
    labelencoder_X_1 = LabelEncoder()
    x[:, 1] = labelencoder_X_1.fit_transform(x[:, 1])
    labelencoder_X_2 = LabelEncoder()
    x[:, 2] = labelencoder_X_2.fit_transform(x[:, 2])

    onehotencoder = OneHotEncoder(categorical_features=[1])
    x = onehotencoder.fit_transform(x).toarray()
    x = x[:, 1:]

    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=123,
                                                        stratify=y)
    sc = StandardScaler()
    x_train = sc.fit_transform(x_train)
    x_test = sc.transform(x_test)

    # train network
    if should_train == "True":
        classifier = KerasClassifier(build_fn=build_classifier)
        parameters = {
            'batch_size': {25, 32},
            'epochs': [100, 500],
            'optimizer': ['adam', 'rmsprop']
        }

        grid_search = GridSearchCV(estimator=classifier,
                                   param_grid=parameters,
                                   scoring='accuracy',
                                   cv=10)

        best_parameters = grid_search.best_params_
        best_accuracy = grid_search.best_score_
        # old train
        #accuracies = cross_val_score(estimator=classifier, X=x_train, y=y_train,cv =10,n_jobs=-1)
        # before using k-fold cv classifier.fit(x_train, y_train, batch_size=10, epochs=15)
        #mean = accuracies.mean()
        #variance = accuracies.std()
        #print("mean: " + str(mean) + ", std: "+str(variance))
        # tell me the truth
        y_pred = classifier.predict(x_test)
        y_pred = (y_pred > 0.5)

        cm = confusion_matrix(y_test, y_pred)
        print(*cm)

        model_json = classifier.to_json()
        with open("/home/bartek/PycharmProjects/ann/model.json",
                  "w") as json_file:
            json_file.write(model_json)
        classifier.save_weights("/home/bartek/PycharmProjects/ann/model.h5")
        print("Saved model to disk")

    check_this_one_guy(labelencoder_X_1, labelencoder_X_2, onehotencoder, sc)
Ejemplo n.º 9
0
def main():
    print('Using Keras version: ', keras.__version__)

    usage = 'usage: %prog [options]'
    parser = argparse.ArgumentParser(usage)
    parser.add_argument(
        '-t',
        '--train_model',
        dest='train_model',
        help=
        'Option to train model or simply make diagnostic plots (0=False, 1=True)',
        default=1,
        type=int)
    parser.add_argument('-s',
                        '--suff',
                        dest='suffix',
                        help='Option to choose suffix for training',
                        default='',
                        type=str)
    parser.add_argument('-p',
                        '--para',
                        dest='hyp_param_scan',
                        help='Option to run hyper-parameter scan',
                        default=0,
                        type=int)
    parser.add_argument(
        '-i',
        '--inputs_file_path',
        dest='inputs_file_path',
        help=
        'Path to directory containing directories \'Bkgs\' and \'Signal\' which contain background and signal ntuples respectively.',
        default='',
        type=str)
    args = parser.parse_args()
    do_model_fit = args.train_model
    suffix = args.suffix

    # Create instance of the input files directory
    #inputs_file_path = 'HHWWgg_DataSignalMCnTuples/2017/'
    inputs_file_path = '/eos/user/b/bmarzocc/HHWWgg/January_2021_Production/2017/'

    hyp_param_scan = args.hyp_param_scan
    # Set model hyper-parameters
    weights = 'BalanceYields'  # 'BalanceYields' or 'BalanceNonWeighted'
    optimizer = 'Nadam'
    validation_split = 0.1
    # hyper-parameter scan results
    if weights == 'BalanceNonWeighted':
        learn_rate = 0.0005
        epochs = 200
        batch_size = 200
    if weights == 'BalanceYields':
        learn_rate = 0.0001
        epochs = 200
        batch_size = 32
        #epochs = 10
        #batch_size=200

    # Create instance of output directory where all results are saved.
    output_directory = 'HHWWyyDNN_binary_%s_%s/' % (suffix, weights)
    check_dir(output_directory)
    hyperparam_file = os.path.join(output_directory,
                                   'additional_model_hyper_params.txt')
    additional_hyperparams = open(hyperparam_file, 'w')
    additional_hyperparams.write("optimizer: " + optimizer + "\n")
    additional_hyperparams.write("learn_rate: " + str(learn_rate) + "\n")
    additional_hyperparams.write("epochs: " + str(epochs) + "\n")
    additional_hyperparams.write("validation_split: " + str(validation_split) +
                                 "\n")
    additional_hyperparams.write("weights: " + weights + "\n")
    # Create plots subdirectory
    plots_dir = os.path.join(output_directory, 'plots/')
    input_var_jsonFile = open('input_variables.json', 'r')
    selection_criteria = '( (Leading_Photon_pt/CMS_hgg_mass) > 1/3 && (Subleading_Photon_pt/CMS_hgg_mass) > 1/4 )'

    # Load Variables from .json
    variable_list = json.load(input_var_jsonFile, encoding="utf-8").items()

    # Create list of headers for dataset .csv
    column_headers = []
    for key, var in variable_list:
        column_headers.append(key)
    column_headers.append('weight')
    column_headers.append('unweighted')
    column_headers.append('target')
    column_headers.append('key')
    column_headers.append('classweight')
    column_headers.append('process_ID')

    # Load ttree into .csv including all variables listed in column_headers
    print('<train-DNN> Input file path: ', inputs_file_path)
    outputdataframe_name = '%s/output_dataframe.csv' % (output_directory)
    if os.path.isfile(outputdataframe_name):
        data = pandas.read_csv(outputdataframe_name)
        print('<train-DNN> Loading data .csv from: %s . . . . ' %
              (outputdataframe_name))
    else:
        print('<train-DNN> Creating new data .csv @: %s . . . . ' %
              (inputs_file_path))
        data = load_data(inputs_file_path, column_headers, selection_criteria)
        # Change sentinal value to speed up training.
        data = data.mask(data < -25., -9.)
        #data = data.replace(to_replace=-99.,value=-9.0)
        data.to_csv(outputdataframe_name, index=False)
        data = pandas.read_csv(outputdataframe_name)

    print('<main> data columns: ', (data.columns.values.tolist()))
    n = len(data)
    nHH = len(data.iloc[data.target.values == 1])
    nbckg = len(data.iloc[data.target.values == 0])
    print("Total (train+validation) length of HH = %i, bckg = %i" %
          (nHH, nbckg))

    # Make instance of plotter tool
    Plotter = plotter()
    # Create statistically independant training/testing data
    traindataset, valdataset = train_test_split(data, test_size=0.1)
    valdataset.to_csv((output_directory + 'valid_dataset.csv'), index=False)

    print('<train-DNN> Training dataset shape: ', traindataset.shape)
    print('<train-DNN> Validation dataset shape: ', valdataset.shape)

    # Event weights
    weights_for_HH = traindataset.loc[traindataset['process_ID'] == 'HH',
                                      'weight']
    weights_for_Hgg = traindataset.loc[traindataset['process_ID'] == 'Hgg',
                                       'weight']
    weights_for_DiPhoton = traindataset.loc[traindataset['process_ID'] ==
                                            'DiPhoton', 'weight']
    weights_for_GJet = traindataset.loc[traindataset['process_ID'] == 'GJet',
                                        'weight']
    weights_for_QCD = traindataset.loc[traindataset['process_ID'] == 'QCD',
                                       'weight']
    weights_for_DY = traindataset.loc[traindataset['process_ID'] == 'DY',
                                      'weight']
    weights_for_TTGsJets = traindataset.loc[traindataset['process_ID'] ==
                                            'TTGsJets', 'weight']
    weights_for_WGsJets = traindataset.loc[traindataset['process_ID'] ==
                                           'WGsJets', 'weight']
    weights_for_WW = traindataset.loc[traindataset['process_ID'] == 'WW',
                                      'weight']

    HHsum_weighted = sum(weights_for_HH)
    Hggsum_weighted = sum(weights_for_Hgg)
    DiPhotonsum_weighted = sum(weights_for_DiPhoton)
    GJetsum_weighted = sum(weights_for_GJet)
    QCDsum_weighted = sum(weights_for_QCD)
    DYsum_weighted = sum(weights_for_DY)
    TTGsJetssum_weighted = sum(weights_for_TTGsJets)
    WGsJetssum_weighted = sum(weights_for_WGsJets)
    WWsum_weighted = sum(weights_for_WW)
    bckgsum_weighted = Hggsum_weighted + DiPhotonsum_weighted + GJetsum_weighted + QCDsum_weighted + DYsum_weighted + TTGsJetssum_weighted + WGsJetssum_weighted + WWsum_weighted
    #bckgsum_weighted = DiPhotonsum_weighted + GJetsum_weighted + QCDsum_weighted + DYsum_weighted + TTGsJetssum_weighted + WGsJetssum_weighted + WWsum_weighted

    nevents_for_HH = traindataset.loc[traindataset['process_ID'] == 'HH',
                                      'unweighted']
    nevents_for_Hgg = traindataset.loc[traindataset['process_ID'] == 'Hgg',
                                       'unweighted']
    nevents_for_DiPhoton = traindataset.loc[traindataset['process_ID'] ==
                                            'DiPhoton', 'unweighted']
    nevents_for_GJet = traindataset.loc[traindataset['process_ID'] == 'GJet',
                                        'unweighted']
    nevents_for_QCD = traindataset.loc[traindataset['process_ID'] == 'QCD',
                                       'unweighted']
    nevents_for_DY = traindataset.loc[traindataset['process_ID'] == 'DY',
                                      'unweighted']
    nevents_for_TTGsJets = traindataset.loc[traindataset['process_ID'] ==
                                            'TTGsJets', 'unweighted']
    nevents_for_WGsJets = traindataset.loc[traindataset['process_ID'] ==
                                           'WGsJets', 'unweighted']
    nevents_for_WW = traindataset.loc[traindataset['process_ID'] == 'WW',
                                      'unweighted']

    HHsum_unweighted = sum(nevents_for_HH)
    Hggsum_unweighted = sum(nevents_for_Hgg)
    DiPhotonsum_unweighted = sum(nevents_for_DiPhoton)
    GJetsum_unweighted = sum(nevents_for_GJet)
    QCDsum_unweighted = sum(nevents_for_QCD)
    DYsum_unweighted = sum(nevents_for_DY)
    TTGsJetssum_unweighted = sum(nevents_for_TTGsJets)
    WGsJetssum_unweighted = sum(nevents_for_WGsJets)
    WWsum_unweighted = sum(nevents_for_WW)
    bckgsum_unweighted = Hggsum_unweighted + DiPhotonsum_unweighted + GJetsum_unweighted + QCDsum_unweighted + DYsum_unweighted + TTGsJetssum_unweighted + WGsJetssum_unweighted + WWsum_unweighted
    #bckgsum_unweighted = DiPhotonsum_unweighted + GJetsum_unweighted + QCDsum_unweighted + DYsum_unweighted + TTGsJetssum_unweighted + WGsJetssum_unweighted + WWsum_unweighted

    HHsum_weighted = 2 * HHsum_weighted
    HHsum_unweighted = 2 * HHsum_unweighted

    if weights == 'BalanceYields':
        print('HHsum_weighted= ', HHsum_weighted)
        print('Hggsum_weighted= ', Hggsum_weighted)
        print('DiPhotonsum_weighted= ', DiPhotonsum_weighted)
        print('GJetsum_weighted= ', GJetsum_weighted)
        print('QCDsum_weighted= ', QCDsum_weighted)
        print('DYsum_weighted= ', DYsum_weighted)
        print('TTGsJetssum_weighted= ', TTGsJetssum_weighted)
        print('WGsJetssum_weighted= ', WGsJetssum_weighted)
        print('WWsum_weighted= ', WWsum_weighted)
        print('bckgsum_weighted= ', bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'HH',
                         ['classweight']] = HHsum_unweighted / HHsum_weighted
        traindataset.loc[traindataset['process_ID'] == 'Hgg',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'DiPhoton',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'GJet',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'QCD',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'DY',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'TTGsJets',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'WGsJets',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)
        traindataset.loc[traindataset['process_ID'] == 'WW',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_weighted)

    if weights == 'BalanceNonWeighted':
        print('HHsum_unweighted= ', HHsum_unweighted)
        print('Hggsum_unweighted= ', Hggsum_unweighted)
        print('DiPhotonsum_unweighted= ', DiPhotonsum_unweighted)
        print('GJetsum_unweighted= ', GJetsum_unweighted)
        print('QCDsum_unweighted= ', QCDsum_unweighted)
        print('DYsum_unweighted= ', DYsum_unweighted)
        print('TTGsJetssum_unweighted= ', TTGsJetssum_unweighted)
        print('WGsJetssum_unweighted= ', WGsJetssum_unweighted)
        print('WWsum_unweighted= ', WWsum_unweighted)
        print('bckgsum_unweighted= ', bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'HH',
                         ['classweight']] = 1.
        traindataset.loc[traindataset['process_ID'] == 'Hgg',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'DiPhoton',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'GJet',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'QCD',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'DY',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'TTGsJets',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'WGsJets',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID'] == 'WW',
                         ['classweight']] = (HHsum_unweighted /
                                             bckgsum_unweighted)

    # Remove column headers that aren't input variables
    training_columns = column_headers[:-6]
    print('<train-DNN> Training features: ', training_columns)

    column_order_txt = '%s/column_order.txt' % (output_directory)
    column_order_file = open(column_order_txt, "wb")
    for tc_i in training_columns:
        line = tc_i + "\n"
        pickle.dump(str(line), column_order_file)

    num_variables = len(training_columns)

    # Extract training and testing data
    X_train = traindataset[training_columns].values
    X_test = valdataset[training_columns].values

    # Extract labels data
    Y_train = traindataset['target'].values
    Y_test = valdataset['target'].values

    # Create dataframe containing input features only (for correlation matrix)
    train_df = data.iloc[:traindataset.shape[0]]

    # Event weights if wanted
    train_weights = traindataset['weight'].values
    test_weights = valdataset['weight'].values

    # Weights applied during training.
    if weights == 'BalanceYields':
        trainingweights = traindataset.loc[:,
                                           'classweight'] * traindataset.loc[:,
                                                                             'weight']
    if weights == 'BalanceNonWeighted':
        trainingweights = traindataset.loc[:, 'classweight']
    trainingweights = np.array(trainingweights)

    ## Input Variable Correlation plot
    correlation_plot_file_name = 'correlation_plot'
    Plotter.correlation_matrix(train_df)
    Plotter.save_plots(dir=plots_dir,
                       filename=correlation_plot_file_name + '.png')
    Plotter.save_plots(dir=plots_dir,
                       filename=correlation_plot_file_name + '.pdf')

    # Fit label encoder to Y_train
    newencoder = LabelEncoder()
    newencoder.fit(Y_train)
    # Transform to encoded array
    encoded_Y = newencoder.transform(Y_train)
    encoded_Y_test = newencoder.transform(Y_test)

    if do_model_fit == 1:
        print('<train-BinaryDNN> Training new model . . . . ')
        histories = []
        labels = []

        if hyp_param_scan == 1:
            print('Begin at local time: ', time.localtime())
            hyp_param_scan_name = 'hyp_param_scan_results.txt'
            hyp_param_scan_results = open(hyp_param_scan_name, 'a')
            time_str = str(time.localtime()) + '\n'
            hyp_param_scan_results.write(time_str)
            hyp_param_scan_results.write(weights)
            learn_rates = [0.00001, 0.0001]
            epochs = [150, 200]
            batch_size = [400, 500]
            param_grid = dict(learn_rate=learn_rates,
                              epochs=epochs,
                              batch_size=batch_size)
            model = KerasClassifier(build_fn=gscv_model, verbose=0)
            grid = GridSearchCV(estimator=model,
                                param_grid=param_grid,
                                n_jobs=-1)
            grid_result = grid.fit(X_train,
                                   Y_train,
                                   shuffle=True,
                                   sample_weight=trainingweights)
            print("Best score: %f , best params: %s" %
                  (grid_result.best_score_, grid_result.best_params_))
            hyp_param_scan_results.write(
                "Best score: %f , best params: %s\n" %
                (grid_result.best_score_, grid_result.best_params_))
            means = grid_result.cv_results_['mean_test_score']
            stds = grid_result.cv_results_['std_test_score']
            params = grid_result.cv_results_['params']
            for mean, stdev, param in zip(means, stds, params):
                print("Mean (stdev) test score: %f (%f) with parameters: %r" %
                      (mean, stdev, param))
                hyp_param_scan_results.write(
                    "Mean (stdev) test score: %f (%f) with parameters: %r\n" %
                    (mean, stdev, param))
            exit()
        else:
            # Define model for analysis
            early_stopping_monitor = EarlyStopping(patience=100,
                                                   monitor='val_loss',
                                                   min_delta=0.01,
                                                   verbose=1)
            #model = baseline_model(num_variables, learn_rate=learn_rate)
            model = new_model(num_variables, learn_rate=learn_rate)

            # Fit the model
            # Batch size = examples before updating weights (larger = faster training)
            # Epoch = One pass over data (useful for periodic logging and evaluation)
            #class_weights = np.array(class_weight.compute_class_weight('balanced',np.unique(Y_train),Y_train))
            history = model.fit(X_train,
                                Y_train,
                                validation_split=validation_split,
                                epochs=epochs,
                                batch_size=batch_size,
                                verbose=1,
                                shuffle=True,
                                sample_weight=trainingweights,
                                callbacks=[early_stopping_monitor])
            histories.append(history)
            labels.append(optimizer)
            # Make plot of loss function evolution
            Plotter.plot_training_progress_acc(histories, labels)
            acc_progress_filename = 'DNN_acc_wrt_epoch'
            Plotter.save_plots(dir=plots_dir,
                               filename=acc_progress_filename + '.png')
            Plotter.save_plots(dir=plots_dir,
                               filename=acc_progress_filename + '.pdf')

            Plotter.history_plot(history, label='loss')
            Plotter.save_plots(dir=plots_dir, filename='history_loss.png')
            Plotter.save_plots(dir=plots_dir, filename='history_loss.pdf')
    else:
        model_name = os.path.join(output_directory, 'model.h5')
        model = load_trained_model(model_name)

    # Node probabilities for training sample events
    result_probs = model.predict(np.array(X_train))
    result_classes = model.predict_classes(np.array(X_train))

    # Node probabilities for testing sample events
    result_probs_test = model.predict(np.array(X_test))
    result_classes_test = model.predict_classes(np.array(X_test))

    # Store model in file
    model_output_name = os.path.join(output_directory, 'model.h5')
    model.save(model_output_name)
    weights_output_name = os.path.join(output_directory, 'model_weights.h5')
    model.save_weights(weights_output_name)
    model_json = model.to_json()
    model_json_name = os.path.join(output_directory, 'model_serialised.json')
    with open(model_json_name, 'w') as json_file:
        json_file.write(model_json)
    model.summary()
    model_schematic_name = os.path.join(output_directory,
                                        'model_schematic.png')
    #plot_model(model, to_file=model_schematic_name, show_shapes=True, show_layer_names=True)

    print('================')
    print('Training event labels: ', len(Y_train))
    print('Training event probs', len(result_probs))
    print('Training event weights: ', len(train_weights))
    print('Testing events: ', len(Y_test))
    print('Testing event probs', len(result_probs_test))
    print('Testing event weights: ', len(test_weights))
    print('================')

    # Initialise output directory.
    Plotter.plots_directory = plots_dir
    Plotter.output_directory = output_directory

    Plotter.ROC(model, X_test, Y_test, X_train, Y_train)
    Plotter.save_plots(dir=plots_dir, filename='ROC.png')
    Plotter.save_plots(dir=plots_dir, filename='ROC.pdf')
Ejemplo n.º 10
0

classificador = KerasClassifier(build_fn=createNetwork)

parametros = {
    'batch_size': [10, 30],
    'epochs': [5, 10],
    'optimizer': ['adam', 'sgd'],
    'loss': [
        'sparse_categorical_crossentropy',
    ],
    'kernel_initializer': ['random_uniform', 'normal'],
    'activation': ['relu', 'tanh'],
    'neurons': [8, 4]
}

grid = GridSearchCV(estimator=classificador, param_grid=parametros, cv=2)

c_teste2 = [np.argmax(t) for t in classe_dummy]
previsoes2 = [np.argmax(t) for t in previssores]

grid = grid.fit(previssores, classe)
melhores_parametros = grid.best_params_
melhor_precissao = grid.best_score_

classificador_json = grid.to_json()
with open('classificador_iris.json', 'w') as json_file:
    json_file.write(classificador_json)

classificador.save_weights('classificador_iris.h5')
Ejemplo n.º 11
0
def main():
    print('Using Keras version: ', keras.__version__)

    usage = 'usage: %prog [options]'
    parser = argparse.ArgumentParser(usage)
    parser.add_argument('-t', '--train_model', dest='train_model', help='Option to train model or simply make diagnostic plots (0=False, 1=True)', default=1, type=int)
    parser.add_argument('-s', '--suff', dest='suffix', help='Option to choose suffix for training', default='', type=str)
    parser.add_argument('-p', '--para', dest='hyp_param_scan', help='Option to run hyper-parameter scan', default=0, type=int)
    parser.add_argument('-i', '--inputs_file_path', dest='inputs_file_path', help='Path to directory containing directories \'Bkgs\' and \'Signal\' which contain background and signal ntuples respectively.', default='', type=str)
    args = parser.parse_args()
    do_model_fit = args.train_model
    suffix = args.suffix

    # Create instance of the input files directory
    inputs_file_path = 'HHWWgg_DataSignalMCnTuples/2017/'

    hyp_param_scan=args.hyp_param_scan
    # Set model hyper-parameters
    weights='BalanceYields'# 'BalanceYields' or 'BalanceNonWeighted'
    optimizer = 'Nadam'
    validation_split=0.1
    # hyper-parameter scan results
    if weights == 'BalanceNonWeighted':
        learn_rate = 0.0005
        epochs = 200
        batch_size=200
    if weights == 'BalanceYields':
        learn_rate = 0.0001
        epochs = 200
        batch_size=400

    # Create instance of output directory where all results are saved.
    output_directory = 'HHWWyyDNN_binary_%s_%s/' % (suffix,weights)
    check_dir(output_directory)
    hyperparam_file = os.path.join(output_directory,'additional_model_hyper_params.txt')
    additional_hyperparams = open(hyperparam_file,'w')
    additional_hyperparams.write("optimizer: "+optimizer+"\n")
    additional_hyperparams.write("learn_rate: "+str(learn_rate)+"\n")
    additional_hyperparams.write("epochs: "+str(epochs)+"\n")
    additional_hyperparams.write("validation_split: "+str(validation_split)+"\n")
    additional_hyperparams.write("weights: "+weights+"\n")
    # Create plots subdirectory
    plots_dir = os.path.join(output_directory,'plots/')
    input_var_jsonFile = open('input_variables.json','r')
    selection_criteria = '( ((Leading_Photon_pt/CMS_hgg_mass) > 0.35) && ((Subleading_Photon_pt/CMS_hgg_mass) > 0.25) && passbVeto==1 && ExOneLep==1 && N_goodJets>=1)'
    # selection_criteria = '(AtLeast4GoodJets0Lep==1)'
    # selection_criteria = '(passPhotonSels==1 && passbVeto==1 && ExOneLep==1 && goodJets==1)'
    #selection_criteria = '( ((Leading_Photon_pt/CMS_hgg_mass) > 0.35) && ((Subleading_Photon_pt/CMS_hgg_mass) > 0.25) && passbVeto==1 && ExOneLep==1 && N_goodJets>=1)'

    # Load Variables from .json
    variable_list = json.load(input_var_jsonFile,encoding="utf-8").items()

    # Create list of headers for dataset .csv
    column_headers = []
    for key,var in variable_list:
        column_headers.append(key)
    column_headers.append('weight')
    column_headers.append('unweighted')
    column_headers.append('target')
    column_headers.append('key')
    column_headers.append('classweight')
    column_headers.append('process_ID')

    # Create instance of the input files directory
    #inputs_file_path = '/afs/cern.ch/work/a/atishelm/public/ForJosh/2017_DataMC_ntuples_moreVars'
    inputs_file_path = '/eos/user/r/rasharma/post_doc_ihep/double-higgs/ntuples/September29/MVANtuples'
    #inputs_file_path = '/eos/user/a/atishelm/ntuples/HHWWgg_DataSignalMCnTuples/PromptPromptApplied/'
    #inputs_file_path = 'PromptPromptApplied/'

    # Load ttree into .csv including all variables listed in column_headers
    print('<train-DNN> Input file path: ', inputs_file_path)
    outputdataframe_name = '%s/output_dataframe.csv' %(output_directory)
    if os.path.isfile(outputdataframe_name):
        data = pandas.read_csv(outputdataframe_name)
        print('<train-DNN> Loading data .csv from: %s . . . . ' % (outputdataframe_name))
    else:
        print('<train-DNN> Creating new data .csv @: %s . . . . ' % (inputs_file_path))
        data = load_data(inputs_file_path,column_headers,selection_criteria)
        # Change sentinal value to speed up training.
        data = data.replace(to_replace=-999.000000,value=-9.0)
        data.to_csv(outputdataframe_name, index=False)
        data = pandas.read_csv(outputdataframe_name)

    print('<main> data columns: ', (data.columns.values.tolist()))
    n = len(data)
    nHH = len(data.iloc[data.target.values == 1])
    nbckg = len(data.iloc[data.target.values == 0])
    print("Total (train+validation) length of HH = %i, bckg = %i" % (nHH, nbckg))

    # Make instance of plotter tool
    Plotter = plotter()
    # Create statistically independant training/testing data
    traindataset, valdataset = train_test_split(data, test_size=0.1)
    valdataset.to_csv((output_directory+'valid_dataset.csv'), index=False)

    print('<train-DNN> Training dataset shape: ', traindataset.shape)
    print('<train-DNN> Validation dataset shape: ', valdataset.shape)


    # Event weights
    weights_for_HH = traindataset.loc[traindataset['process_ID']=='HH', 'weight']
    weights_for_DiPhoton = traindataset.loc[traindataset['process_ID']=='DiPhoton', 'weight']
    weights_for_GJet = traindataset.loc[traindataset['process_ID']=='GJet', 'weight']
    weights_for_DY = traindataset.loc[traindataset['process_ID']=='DY', 'weight']
    weights_for_TTGG = traindataset.loc[traindataset['process_ID']=='TTGG', 'weight']
    weights_for_TTGJets = traindataset.loc[traindataset['process_ID']=='TTGJets', 'weight']
    weights_for_TTJets = traindataset.loc[traindataset['process_ID']=='TTJets', 'weight']
    weights_for_WJets = traindataset.loc[traindataset['process_ID']=='WJets', 'weight']
    weights_for_ttH = traindataset.loc[traindataset['process_ID']=='ttH', 'weight']

    HHsum_weighted= sum(weights_for_HH)
    GJetsum_weighted= sum(weights_for_GJet)
    DiPhotonsum_weighted= sum(weights_for_DiPhoton)
    TTGGsum_weighted= sum(weights_for_TTGG)
    TTGJetssum_weighted= sum(weights_for_TTGJets)
    TTJetssum_weighted= sum(weights_for_TTJets)
    WJetssum_weighted= sum(weights_for_WJets)
    ttHsum_weighted= sum(weights_for_ttH)
    DYsum_weighted= sum(weights_for_DY)
    #bckgsum_weighted = DiPhotonsum_weighted+WJetssum_weighted+ttHsum_weighted
    bckgsum_weighted = DiPhotonsum_weighted+WJetssum_weighted

    nevents_for_HH = traindataset.loc[traindataset['process_ID']=='HH', 'unweighted']
    nevents_for_DiPhoton = traindataset.loc[traindataset['process_ID']=='DiPhoton', 'unweighted']
    nevents_for_GJet = traindataset.loc[traindataset['process_ID']=='GJet', 'unweighted']
    nevents_for_DY = traindataset.loc[traindataset['process_ID']=='DY', 'unweighted']
    nevents_for_TTGG = traindataset.loc[traindataset['process_ID']=='TTGG', 'unweighted']
    nevents_for_TTGJets = traindataset.loc[traindataset['process_ID']=='TTGJets', 'unweighted']
    nevents_for_TTJets = traindataset.loc[traindataset['process_ID']=='TTJets', 'unweighted']
    nevents_for_WJets = traindataset.loc[traindataset['process_ID']=='WJets', 'unweighted']
    nevents_for_ttH = traindataset.loc[traindataset['process_ID']=='ttH', 'unweighted']

    HHsum_unweighted= sum(nevents_for_HH)
    GJetsum_unweighted= sum(nevents_for_GJet)
    DiPhotonsum_unweighted= sum(nevents_for_DiPhoton)
    TTGGsum_unweighted= sum(nevents_for_TTGG)
    TTGJetssum_unweighted= sum(nevents_for_TTGJets)
    TTJetssum_unweighted= sum(nevents_for_TTJets)
    WJetssum_unweighted= sum(nevents_for_WJets)
    ttHsum_unweighted= sum(nevents_for_ttH)
    DYsum_unweighted= sum(nevents_for_DY)

    #bckgsum_unweighted = DiPhotonsum_unweighted+WJetssum_unweighted+ttHsum_unweighted
    bckgsum_unweighted = DiPhotonsum_unweighted+WJetssum_unweighted


    if weights=='BalanceYields':
        print('HHsum_weighted= ' , HHsum_weighted)
        print('ttHsum_weighted= ' , ttHsum_weighted)
        print('DiPhotonsum_weighted= ', DiPhotonsum_weighted)
        print('WJetssum_weighted= ', WJetssum_weighted)
        print('DYsum_weighted= ', DYsum_weighted)
        print('GJetsum_weighted= ', GJetsum_weighted)
        print('bckgsum_weighted= ', bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='HH', ['classweight']] = 1.
        traindataset.loc[traindataset['process_ID']=='GJet', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='DY', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='DiPhoton', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='WJets', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='TTGG', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='TTGJets', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='TTJets', ['classweight']] = (HHsum_weighted/bckgsum_weighted)
        traindataset.loc[traindataset['process_ID']=='ttH', ['classweight']] = (HHsum_weighted/bckgsum_weighted)

    if weights=='BalanceNonWeighted':
        print('HHsum_unweighted= ' , HHsum_unweighted)
        print('ttHsum_unweighted= ' , ttHsum_unweighted)
        print('DiPhotonsum_unweighted= ', DiPhotonsum_unweighted)
        print('WJetssum_unweighted= ', WJetssum_unweighted)
        print('DYsum_unweighted= ', DYsum_unweighted)
        print('GJetsum_unweighted= ', GJetsum_unweighted)
        print('bckgsum_unweighted= ', bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='HH', ['classweight']] = 1.
        traindataset.loc[traindataset['process_ID']=='GJet', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='DY', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='DiPhoton', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='WJets', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='TTGG', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='TTGJets', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='TTJets', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)
        traindataset.loc[traindataset['process_ID']=='ttH', ['classweight']] = (HHsum_unweighted/bckgsum_unweighted)

    # Remove column headers that aren't input variables
    training_columns = column_headers[:-6]
    print('<train-DNN> Training features: ', training_columns)

    column_order_txt = '%s/column_order.txt' %(output_directory)
    column_order_file = open(column_order_txt, "wb")
    for tc_i in training_columns:
        line = tc_i+"\n"
        pickle.dump(str(line), column_order_file)

    num_variables = len(training_columns)

    # Extract training and testing data
    X_train = traindataset[training_columns].values
    X_test = valdataset[training_columns].values

    # Extract labels data
    Y_train = traindataset['target'].values
    Y_test = valdataset['target'].values

    # Create dataframe containing input features only (for correlation matrix)
    train_df = data.iloc[:traindataset.shape[0]]

    ## Input Variable Correlation plot
    correlation_plot_file_name = 'correlation_plot.png'
    Plotter.correlation_matrix(train_df)
    Plotter.save_plots(dir=plots_dir, filename=correlation_plot_file_name)

    ####################################################################################
    # Weights applied during training. You will also need to update the class weights if
    # you are going to change the event weights applied. Introduce class weights and any
    # event weight you want to use here.
    #trainingweights = traindataset.loc[:,'classbalance']#*traindataset.loc[:,'weight']
    #trainingweights = np.array(trainingweights)

    # Temp hack to be able to change class weights without remaking dataframe
    #for inde in xrange(len(trainingweights)):
    #    newweight = 13243.0/6306.0
    #    trainingweights[inde]= newweight
    #print 'training event weight = ', trainingweights[0]

    # Event weights calculation so we can correctly apply event weights to diagnostic plots.
    # use seperate list because we don't want to apply class weights in plots.
    # Event weights if wanted
    train_weights = traindataset['weight'].values
    test_weights = valdataset['weight'].values

    # Weights applied during training.
    if weights=='BalanceYields':
        trainingweights = traindataset.loc[:,'classweight']*traindataset.loc[:,'weight']
    if weights=='BalanceNonWeighted':
        trainingweights = traindataset.loc[:,'classweight']
    trainingweights = np.array(trainingweights)

    ## Input Variable Correlation plot
    correlation_plot_file_name = 'correlation_plot.pdf'
    Plotter.correlation_matrix(train_df)
    Plotter.save_plots(dir=plots_dir, filename=correlation_plot_file_name)

    # Fit label encoder to Y_train
    newencoder = LabelEncoder()
    newencoder.fit(Y_train)
    # Transform to encoded array
    encoded_Y = newencoder.transform(Y_train)
    encoded_Y_test = newencoder.transform(Y_test)

    if do_model_fit == 1:
        print('<train-BinaryDNN> Training new model . . . . ')
        histories = []
        labels = []

        if hyp_param_scan == 1:
            print('Begin at local time: ', time.localtime())
            hyp_param_scan_name = 'hyp_param_scan_results.txt'
            hyp_param_scan_results = open(hyp_param_scan_name,'a')
            time_str = str(time.localtime())+'\n'
            hyp_param_scan_results.write(time_str)
            hyp_param_scan_results.write(weights)
            learn_rates=[0.00001, 0.0001]
            epochs = [150,200]
            batch_size = [400,500]
            param_grid = dict(learn_rate=learn_rates,epochs=epochs,batch_size=batch_size)
            model = KerasClassifier(build_fn=gscv_model,verbose=0)
            grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
            grid_result = grid.fit(X_train,Y_train,shuffle=True,sample_weight=trainingweights)
            print("Best score: %f , best params: %s" % (grid_result.best_score_,grid_result.best_params_))
            hyp_param_scan_results.write("Best score: %f , best params: %s\n" %(grid_result.best_score_,grid_result.best_params_))
            means = grid_result.cv_results_['mean_test_score']
            stds = grid_result.cv_results_['std_test_score']
            params = grid_result.cv_results_['params']
            for mean, stdev, param in zip(means, stds, params):
                print("Mean (stdev) test score: %f (%f) with parameters: %r" % (mean,stdev,param))
                hyp_param_scan_results.write("Mean (stdev) test score: %f (%f) with parameters: %r\n" % (mean,stdev,param))
            exit()
        else:
            # Define model for analysis
            early_stopping_monitor = EarlyStopping(patience=30, monitor='val_loss', verbose=1)
            model = baseline_model(num_variables, learn_rate=learn_rate)

            # Fit the model
            # Batch size = examples before updating weights (larger = faster training)
            # Epoch = One pass over data (useful for periodic logging and evaluation)
            #class_weights = np.array(class_weight.compute_class_weight('balanced',np.unique(Y_train),Y_train))
            history = model.fit(X_train,Y_train,validation_split=validation_split,epochs=epochs,batch_size=batch_size,verbose=1,shuffle=True,sample_weight=trainingweights,callbacks=[early_stopping_monitor])
            histories.append(history)
            labels.append(optimizer)
            # Make plot of loss function evolution
            Plotter.plot_training_progress_acc(histories, labels)
            acc_progress_filename = 'DNN_acc_wrt_epoch.png'
            Plotter.save_plots(dir=plots_dir, filename=acc_progress_filename)
    else:
        model_name = os.path.join(output_directory,'model.h5')
        model = load_trained_model(model_name)

    # Node probabilities for training sample events
    result_probs = model.predict(np.array(X_train))
    result_classes = model.predict_classes(np.array(X_train))

    # Node probabilities for testing sample events
    result_probs_test = model.predict(np.array(X_test))
    result_classes_test = model.predict_classes(np.array(X_test))

    # Store model in file
    model_output_name = os.path.join(output_directory,'model.h5')
    model.save(model_output_name)
    weights_output_name = os.path.join(output_directory,'model_weights.h5')
    model.save_weights(weights_output_name)
    model_json = model.to_json()
    model_json_name = os.path.join(output_directory,'model_serialised.json')
    with open(model_json_name,'w') as json_file:
        json_file.write(model_json)
    model.summary()
    model_schematic_name = os.path.join(output_directory,'model_schematic.eps')
    print "DEBUG: ",model_schematic_name
    plot_model(model, to_file=model_schematic_name, show_shapes=True, show_layer_names=True)
    # plot_model(model, to_file='model_schematic.eps', show_shapes=True, show_layer_names=True)

    # Initialise output directory.
    Plotter.plots_directory = plots_dir
    Plotter.output_directory = output_directory

    '''
    print('================')
    print('Training event labels: ', len(Y_train))
    print('Training event probs', len(result_probs))
    print('Training event weights: ', len(train_weights))
    print('Testing events: ', len(Y_test))
    print('Testing event probs', len(result_probs_test))
    print('Testing event weights: ', len(test_weights))
    print('================')
    '''

    # Make overfitting plots of output nodes
    Plotter.binary_overfitting(model, Y_train, Y_test, result_probs, result_probs_test, plots_dir, train_weights, test_weights)
    print "DEBUG: Y_train shape: ",Y_train.shape

    # # Get true process integers for training dataset
    # original_encoded_train_Y = []
    # for i in xrange(len(result_probs)):
    #     if Y_train[i][0] == 1:
    #         original_encoded_train_Y.append(0)
    #     if Y_train[i][1] == 1:
    #         original_encoded_train_Y.append(1)
    #     if Y_train[i][2] == 1:
    #         original_encoded_train_Y.append(2)
    #     if Y_train[i][3] == 1:
    #         original_encoded_train_Y.append(3)

    # Get true class values for testing dataset
    # result_classes_test = newencoder.inverse_transform(result_classes_test)
    # result_classes_train = newencoder.inverse_transform(result_classes)
    e = shap.DeepExplainer(model, X_train[:400, ])
    shap_values = e.shap_values(X_test[:400, ])
    Plotter.plot_dot(title="DeepExplainer_sigmoid_y0", x=X_test[:400, ], shap_values=shap_values, column_headers=column_headers)
    Plotter.plot_dot_bar(title="DeepExplainer_Bar_sigmoid_y0", x=X_test[:400,], shap_values=shap_values, column_headers=column_headers)
    #e = shap.GradientExplainer(model, X_train[:100, ])
    #shap_values = e.shap_values(X_test[:100, ])
    #Plotter.plot_dot(title="GradientExplainer_sigmoid_y0", x=X_test[:100, ], shap_values=shap_values, column_headers=column_headers)
    #e = shap.KernelExplainer(model.predict, X_train[:100, ])
    #shap_values = e.shap_values(X_test[:100, ])
    #Plotter.plot_dot(title="KernelExplainer_sigmoid_y0", x=X_test[:100, ],shap_values=shap_values, column_headers=column_headers)
    #Plotter.plot_dot_bar(title="KernelExplainer_Bar_sigmoid_y0", x=X_test[:100,], shap_values=shap_values, column_headers=column_headers)
    #Plotter.plot_dot_bar_all(title="KernelExplainer_bar_All_Var_sigmoid_y0", x=X_test[:100,], shap_values=shap_values, column_headers=column_headers)

    # Create confusion matrices for training and testing performance
    # Plotter.conf_matrix(original_encoded_train_Y,result_classes_train,train_weights,'index')
    # Plotter.save_plots(dir=plots_dir, filename='yields_norm_confusion_matrix_TRAIN.png')
    # Plotter.conf_matrix(original_encoded_test_Y,result_classes_test,test_weights,'index')
    # Plotter.save_plots(dir=plots_dir, filename='yields_norm_confusion_matrix_TEST.png')

    # Plotter.conf_matrix(original_encoded_train_Y,result_classes_train,train_weights,'columns')
    # Plotter.save_plots(dir=plots_dir, filename='yields_norm_columns_confusion_matrix_TRAIN.png')
    # Plotter.conf_matrix(original_encoded_test_Y,result_classes_test,test_weights,'columns')
    # Plotter.save_plots(dir=plots_dir, filename='yields_norm_columns_confusion_matrix_TEST.png')

    # Plotter.conf_matrix(original_encoded_train_Y,result_classes_train,train_weights,'')
    # Plotter.save_plots(dir=plots_dir, filename='yields_matrix_TRAIN.png')
    # Plotter.conf_matrix(original_encoded_test_Y,result_classes_test,test_weights,'')
    # Plotter.save_plots(dir=plots_dir, filename='yields_matrix_TEST.png')

    Plotter.ROC_sklearn(Y_train, result_probs, Y_test, result_probs_test, 1 , 'BinaryClassifierROC',train_weights, test_weights)
Ejemplo n.º 12
0
                          metrics=['binary_accuracy'])

    return classificador


classificador = KerasClassifier(build_fn=criarRede)
parametros = {
    'batch_size': [10, 30],
    'epochs': [50, 100],
    'optimizer': ['adam', 'sgd'],
    'loss': ['binary_crossentropy', 'hinge'],
    'kernel_initializer': ['random_uniform', 'normal'],
    'activation': ['relu', 'tanh'],
    'neurons': [16, 8]
}

grid_search = GridSearchCV(estimator=classificador,
                           param_grid=parametros,
                           scoring='accuracy',
                           cv=5)
grid_search = grid_search.fit(previsores, classe)

melhores_parametros = grid_search.best_params_
melhore_precisao = grid_search.best_score_

classificador_json = classificador.to_json()
with open('irisneuralnetwork.json', 'w') as json_file:
    json_file.write(classificador_json)

classificador.save_weights('irisweights.h5')