예제 #1
0
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs Available: ", len(physical_devices))

# Sequential Model
model = Sequential([
    Dense(units=16, input_shape=(1, ), activation='relu'),  # 1st hidden layer
    Dense(units=32, activation='relu'),  # 2nd hidden layer
    Dense(units=2, activation='softmax')  # 3rd hidden layer
])

model.summary()

# Train an ANN

model.compile(optimizer=Adam(learning_rate=0.0001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(x=scaled_train_samples,
          y=train_labels,
          validation_split=0.1,
          batch_size=10,
          epochs=10,
          shuffle=True,
          verbose=2)

import graphviz
from ann_visualizer.visualize import ann_viz

ann_viz(model, title="CNN")
예제 #2
0
y = labels
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=42)
# print(len(x_train), len(x_test))

scaler_object = MinMaxScaler()
scaler_object.fit(x_train)
scaled_x_train = scaler_object.transform(x_train)
# print (scaled_x_train)
scaled_x_test = scaler_object.transform(x_test)
# build ANN
model = Sequential()
model.add(Dense(3, input_dim=4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
from ann_visualizer.visualize import ann_viz
ann_viz(model, title="My ANN")
# set parameters for ANN
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
# train the model
model.fit(scaled_x_train, y_train, batch_size=5, epochs=60, verbose=2)
model.save('D:/bookFiles/bankFraudDetection.h5')

# evaluate the model
predictions = model.predict_classes(scaled_x_test)
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
예제 #3
0

model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)


# In[9]:


scores = model.evaluate(X_test, y_test, verbose=0)
print("Large CNN Accuracy: %.2f%%" % (scores[1]*100))


# In[10]:


ann_viz(model, title='A CNN model to identify handwritten digits')


# In[11]:


img = load_img('im8.jpg')


# In[12]:


img.size


# In[13]:
# Visualising the Regression results
params = {'legend.fontsize': 40}
plt.rcParams.update(params)
x = np.arange(720)
fig = plt.figure()
fig.show()
ax = fig.add_subplot(111)
ax.plot(x, y_test[:], c='b', label='observed', fillstyle='none', linewidth=6.0)
ax.plot(x, y_pred_test[:], c='r', label='predicted', linewidth=6.0)
ax.xaxis.set_ticks([i * 60 for i in range(0, 13)])
ax.set_ylabel('Power Generation', fontsize=44)
ax.set_xlabel('Time', fontsize=44)
ax.tick_params(labelsize=36)
ax.grid()
plt.legend(loc=2)
plt.title('ANN', fontsize=30)
plt.draw()

# Export to .csv
solution = pd.read_csv('Solution.csv', index_col='TIMESTAMP')
df = pd.DataFrame(index=dataset_test_y.index)
df['POWER'] = solution.POWER
df['ANN FORECAST'] = y_pred_test
df.to_csv(path_or_buf='ForecastTemplate1.csv',
          columns=['ANN FORECAST'],
          date_format='%Y%m%d %H:%M')

from ann_visualizer.visualize import ann_viz

ann_viz(Regressor, title="title", view=True)
예제 #5
0
def main():
    # Read file with dataset
    data = pd.read_csv("data/sonar.all-data", header=None)
    data = data.dropna()

    # Perform feature selection
    # data_selected = feature_selection(data, "rfe")
    final_data = feature_selection(data, "pca")

    # Create model
    ann_model = create_model(num=len(final_data.columns)-1)
    early_stopping = keras.callbacks.EarlyStopping(
        verbose=1,
        patience=10,
        mode='max',
        restore_best_weights=True)

    model = KerasClassifier(build_fn=create_model, num=len(final_data.columns)-1, verbose=1)

    ann_model.summary()
    plot_model(ann_model, to_file='plots/model_plot.png', show_shapes=True, show_layer_names=True)
    ann_viz(ann_model, filename="plots/neural_network.png", title="NN Architecture")

    # Split train and test set
    X = final_data.loc[:, ~final_data.columns.isin(["target"])]
    y = final_data["target"]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

    # Setting a dictionary with hyperparameters
    epochs = [200]
    optimizer = ['RMSprop', 'Adam', 'Nadam']
    learn_rate = [0.0001, 0.001]
    momentum = [0.2, 0.4, 0.6, 0.8, 0.9]
    init_mode = ['lecun_uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']
    dropout_rate = [0.3, 0.4, 0.5]
    neurons1 = [15, 20]
    neurons2 = [15, 20]

    param_grid = dict(epochs=epochs, optimizer=optimizer, learn_rate=learn_rate, momentum=momentum,
                      init_mode=init_mode, dropout_rate=dropout_rate, neurons1=neurons1, neurons2=neurons2)

    if GRID:
        grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=5)
        grid_result = grid.fit(X_train, y_train, **{'callbacks': [early_stopping]})

        # summarize results
        print("Best score: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
        means = grid_result.cv_results_['mean_test_score']
        stds = grid_result.cv_results_['std_test_score']
        params = grid_result.cv_results_['params']
        for mean, stdev, param in zip(means, stds, params):
            print("%f (%f) with: %r" % (mean, stdev, param))

        print("Saving best estimator for evaluations on test dataset")
        filename = "model/best_estimator.sav"
        pickle.dump(grid_result.best_estimator_, open(filename, 'wb'))

        y_pred_class = grid_result.best_estimator_.predict(X_test)
        print("Accuracy on the test set ", metrics.accuracy_score(y_test, y_pred_class))
    else:
        kfold = StratifiedKFold(n_splits=5, random_state=1)
        results = cross_val_score(model, X_train, y_train, cv=kfold)

        print(results.mean())
예제 #6
0
                                   horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory('dataset/train',
                                                 target_size=(64, 64),
                                                 batch_size=32,
                                                 class_mode='binary')
test_set = test_datagen.flow_from_directory('dataset/test',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')
classifier.fit_generator(training_set,
                         steps_per_epoch=10,
                         epochs=10,
                         validation_data=test_set,
                         validation_steps=2000)
acc = classifier.evaluate_generator(test_set)
ann_viz(classifier, title="My first neural network")

import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/img.jpg', target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
    prediction = 'audi'
else:
    prediction = 'bmw'
print(prediction)
print("Accuracy = ", acc[1] * 100, "%")
예제 #7
0
plt.xlabel('Epoch')
plt.legend(['Entrenamiento', 'Prueba'], loc='upper left')
plt.show()

# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Pérdida del modelo')
plt.ylabel('Pérdida')
plt.xlabel('Epoch')
plt.legend(['Entrenamiento', 'Prueba'], loc='upper left')
plt.show()

score = model.evaluate(X_test, y_test, batch_size=128)

from ann_visualizer.visualize import ann_viz
import os.path
from keras.utils import plot_model

save_path = 'C:/Users/Camila/Documents/Tesis/'
plot_model(model, to_file=os.path.join(save_path, "NN-FF01.png"))

import os

os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'

ann_viz(model,
        view=True,
        filename=os.path.join(save_path, "NN-FF01.gv"),
        title="NN-FF01")
# Making the Confusion Matrix

from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, predictions, normalize=None)

print(cm)

# classification report
from sklearn.metrics import classification_report

print(classification_report(y_test, predictions))
"""Plot the confusion matrix"""

import seaborn as sns
import matplotlib.pyplot as plt

ax = plt.subplot()
sns.heatmap(cm, annot=True, ax=ax)

ax.set_ylabel('Animal classes')
ax.set_xlabel('')
ax.set_title('Confusion Matrix')
ax.yaxis.set_ticklabels(['bear', 'bird', 'cat'])
ax.xaxis.set_ticklabels(['', '', ''])

#Creating a image of the ANN
from ann_visualizer.visualize import ann_viz

ann_viz(model, title="ANN", filename="ANN")
예제 #9
0
        if (i==1):
            model.add(Dense(hidden_dim[i],input_dim=hidden_dim[0],kernel_initializer="normal",activation="relu"))
        else:
            model.add(Dense(hidden_dim[i],activation="relu"))
    model.add(Dense(hidden_dim[-1]))
    model.compile(loss="mean_squared_error",optimizer="adam",metrics = ["accuracy"])
    return (model)

column_names = list(startup.columns)
# split dataset into predictor & target
predictors = column_names[0:4]
target = column_names[4]
# convert datatype into flot
ta = np.array(startup[target].astype('float'))
pu = np.array(startup[predictors].astype('float'))
ta.dtype
pu.dtype
startup.dtypes
# print pred_train & rmse values of dataseet.
first_model = prep_model([4,50,1])
first_model.fit(pu,ta,epochs=5)
pred_train = first_model.predict(pu)
pred_train = pd.Series([i[0] for i in pred_train])
rmse_value = np.sqrt(np.mean((pred_train-startup[target])**2))
# plot graph
plt.plot(pred_train,startup[target],"bo")
# print correlation value
np.corrcoef(pred_train,startup[target])
# neural network graph plot
ann_viz(first_model, title="50startup neural network")
예제 #10
0
                        inactiveJunction[1]
                    ) + traci.edge.getLastStepHaltingNumber(
                        inactiveJunction[2])
                    reward = reward2 - reward1
                    rewards.append(reward)

#            new_state = sumoInt.getState()
#            agent.remember(state, action, reward, new_state, False)
#            # Randomly Draw 32 samples and train the neural network by RMS Prop algorithm
#            if(len(agent.memory) > batch_size):
#                agent.replay(batch_size)
#
#        sumoInt.visualize(rewards)
#
#
#        mem = agent.memory[-1]
#        del agent.memory[-1]
#        agent.memory.append((mem[0], mem[1], reward, mem[3], True))
#        #log.write('episode - ' + str(e) + ', total waiting time - ' +
#        #          str(waiting_time) + ', static waiting time - 338798 \n')
#        #log.close()
#        print('episode - ' + str(e) + ' total waiting time - ' + str(waiting_time))
#        #agent.save('reinf_traf_control_' + str(e) + '.h5')
##            two = sumoInt.visualize_eyes(Vel=velocityMatrix)
#
        traci.close(wait=False)

ann_viz(model, view=True, filename='network.gv', title='MyNeural Network')

sys.stdout.flush()
예제 #11
0
#split dataset into sets for testing and training
X = dataset[:, 0:8]
Y = dataset[:, 8]
x_train, x_test, y_train, y_test = train_test_split(X,
                                                    Y,
                                                    test_size=.2,
                                                    random_state=42)

model = Sequential()
#number of attributes for input_dim
#every layer is closley connected to next layer by dense
model.add(Dense(50, input_dim=8, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(x_train,
          y_train,
          batch_size=50,
          epochs=1,
          validation_data=(x_test, y_test))
scores = model.evaluate(X, Y)
print(model.metrics_names[1], scores[1] * 100)

from ann_visualizer.visualize import ann_viz
ann_viz(model, title="", view=True)
print(model.layers[1].get_config())
예제 #12
0
파일: mlp.py 프로젝트: Wigder/inns
from ann_visualizer.visualize import ann_viz
from keras import Sequential
from keras.layers import Dense, Dropout

from load_data import x, y, dimensions
from mccv_keras import mccv

hidden_layers = 5
plot = False  # Switch to True to output the architecture as a .png file.
name = ""  # If the above is set to True, this will be the name of the output file.
title = ""  # If the above is set to True, this will be the title of the graph.

model = Sequential()
model.add(Dense(16, input_dim=dimensions, dtype="float32", activation="relu"))
for i in range(hidden_layers - 1):
    model.add(Dense(16, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()

if plot:
    ann_viz(model, filename="{}.gv".format(name), title=title)

mccv(x, y, model)
예제 #13
0
from ann_visualizer.visualize import ann_viz;
from keras.models import model_from_json
import numpy
# fix random seed for reproducibility
numpy.random.seed(7)
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
ann_viz(model, title="Artificial Neural network - Model Visualization")
예제 #14
0

# split dataset in predictors & target
column_names = list(Concrete.columns)
predictors = column_names[0:8]
target = column_names[8]

# print pred_train & rmse values of dataseet.
first_model = prep_model([8, 50, 1])
first_model.fit(np.array(Concrete[predictors]),
                np.array(Concrete[target]),
                epochs=9)
pred_train = first_model.predict(np.array(Concrete[predictors]))
pred_train = pd.Series([i[0] for i in pred_train])
rmse_value = np.sqrt(np.mean((pred_train - Concrete[target])**2))

# print correlation coefficient
np.corrcoef(pred_train, Concrete[target])

# neural network graph plot
ann_viz(first_model, title="concrete neural network")

# plot graph for visualization.
plt.hist(Concrete['age'], edgecolor='k')
sns.distplot(Concrete['age'], hist=False)
plt.plot(pred_train, Concrete[target], "bo")
plt.scatter(Concrete['cement'], y=Concrete['fineagg'], color='green', alpha=.6)
plt.show()
plt.scatter(Concrete['cement'], y=Concrete['water'], color='red', alpha=.6)
plt.show()
# Adding the output layer
classifier.add(Dense(1, kernel_initializer = 'uniform', activation = 'sigmoid'))

# Compiling the Artificial Neural Network
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics=['accuracy'])

# Fitting the Artificial Neural Network to the trainig set
classifier.fit(X_train, y_train, batch_size = 50, epochs = 100)

# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)

# Accuracy of test set
score = classifier.evaluate(X_test, y_test)
print("\n%s: %.2f%%" % (classifier.metrics_names[1], score[1]*100))

# Makig the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

weights = classifier.get_weights()

# Visualising Model
from keras.utils.vis_utils import plot_model
plot_model(classifier, to_file='D:/Cancer/model_plot.png', show_shapes=True, show_layer_names=True)

# Visualising Network
from ann_visualizer.visualize import ann_viz
ann_viz(classifier, title="wdbc_cancer_ann_model")
예제 #16
0
from keras.models import Sequential
from keras.layers import Dense
from ann_visualizer.visualize import ann_viz

network = Sequential()

network.add(
    Dense(units=6,
          activation='relu',
          kernel_initializer='uniform',
          input_dim=11))

network.add(Dense(units=6, activation='relu', kernel_initializer='uniform'))

network.add(Dense(units=1, activation='sigmoid', kernel_initializer='uniform'))

ann_viz(network)
예제 #17
0
#Fitting the ANN to the training set
history = classifier.fit(X_train, y_train, validation_split=0.33, batch_size = 10, epochs = 100, verbose=0)

# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
print('F1_score =    {:.3f}'.format(f1_score(cm)))

#%%
plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
ann_viz(classifier, view=True, filename='net.gv', title='Neural Network')
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
예제 #18
0
column_names = list(Concrete.columns)
predictors = column_names[0:8]
target = column_names[8]

# print pred_train & rmse values of dataseet.
first_model = prep_model([8,50,1])
first_model.fit(np.array(Concrete[predictors]),np.array(Concrete[target]),epochs=9)
pred_train = first_model.predict(np.array(Concrete[predictors]))
pred_train = pd.Series([i[0] for i in pred_train])
rmse_value = np.sqrt(np.mean((pred_train-Concrete[target])**2))

# print correlation coefficient
np.corrcoef(pred_train,Concrete[target])

# neural network graph plot
ann_viz(first_model, view=True, title="Concrete Neural Network")

# plot graph for visualization.
plt.hist(Concrete['age'],edgecolor='k')
plt.xlabel("Age")
plt.show()
sns.distplot(Concrete['age'],hist=False)
plt.show()
plt.plot(pred_train,Concrete[target],"bo")
plt.xlabel("Predict Train")
plt.ylabel("Concrete [target]")
plt.show()
plt.scatter(Concrete['cement'],y=Concrete['fineagg'],color='green',alpha=.6)
plt.xlabel("Cement")
plt.ylabel("Fineagg")
plt.show()
예제 #19
0
    print(cm)
    print(acc)
    print(scores)
#---------------------------------------------------------------------------
    
    
    # Model Accuracy: how often is the classifier correct?
    print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
  
    # Model Precision: what percentage of positive tuples are labeled as such?
    print("Precision:",metrics.precision_score(y_test, y_pred))
    
    # Model Recall: what percentage of positive tuples are labelled as such?
    print("Recall:",metrics.recall_score(y_test, y_pred))
   
#---------------------------------------------------------------------------
    ann_viz(classifier, title="Neural Network for Diabetes Dataset")
#---------------------------------------------------------------------------
    
    
   









        vertical_flip=False,  # randomly flip images
        # set rescaling factor (applied before any other transformation)
        rescale=None,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format=None,
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                        epochs=epochs,
                        validation_data=(x_test, y_test),
                        workers=4)

# Save model and weights
model.save('model.h5')
print('Saved Trained Model')

# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])

ann_viz(model, title="Convolutional Neural Network", view=True)
예제 #21
0
            model.add(Dense(hidden_dim[i], activation="relu"))
    model.add(Dense(hidden_dim[-1]))
    model.compile(loss="mean_squared_error",
                  optimizer="adam",
                  metrics=["accuracy"])
    return (model)


# devided dataset inti predictor & target
column_names = list(forest.columns)
predictors = forest.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11]]
target = forest.iloc[:, 10]
target = target.astype('int')
pu = np.array(predictors).astype('int')

# print pred_train & rmse values of dataseet.
first_model = prep_model([11, 517, 1])
first_model.fit(pu, np.array(target), epochs=9)
pred_train = first_model.predict(pu)
pred_train = pd.Series([i[0] for i in pred_train])
rmse_value = np.sqrt(np.mean((pred_train - target)**2))

# plot graph
plt.plot(pred_train, target, "bo")
plt.xlabel("Predict Train")
plt.ylabel("Target")
plt.show()

# neural network graph plot
ann_viz(first_model, title="forestfire neural network")
예제 #22
0
# Start trainings
training_start_time = time()
malstm_trained = model.fit(
    [X_train['left'], X_train['right']],
    Y_train,
    batch_size=batch_size,
    epochs=n_epoch,
    validation_data=([X_validation['left'],
                      X_validation['right']], Y_validation))
training_end_time = time()
print("Training time finished.\n%d epochs in %12.2f" %
      (n_epoch, training_end_time - training_start_time))

model.save('./result/SiameseLSTM.h5')
ann_viz(model, title="SiameseLSTM", filename="result/SiameseLSTM")

# Plot accuracy
plt.subplot(1)
plt.plot(malstm_trained.history['acc'])
plt.plot(malstm_trained.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')

# Plot loss
plt.subplot(2)
plt.plot(malstm_trained.history['loss'])
plt.plot(malstm_trained.history['val_loss'])
plt.title('Model Loss')
예제 #23
0
from sklearn.metrics import confusion_matrix
cm_adver = confusion_matrix(y_adver, y_adver_pred)

#Now, let’s plot the ROC for the MODEL;
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_org,
         tpr_org,
         label='Original+Adversarial samples (area = {:.3f})'.format(auc_org))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()

end_time = time.time()
print('Execution time: ' + str(round(end_time - start_time, 3)) + 'seconds')

#******SAVING THE MODEL********************************************************
#save json
model_json = model.to_json()
with open("SmartAM2.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("SmartAM2.h5")
print("Saved model to disk")

#visualize the ann
from ann_visualizer.visualize import ann_viz
ann_viz(model, view=True, filename="SmartAM2.gv", title="SmartAM2")
예제 #24
0
model = Sequential()
model.add(Dense(30, input_dim=27, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu')) 
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam' , metrics=['mse','mae'])
model.fit(X_train_norm, Y_train_small , epochs=1000, verbose=0)
predicted = model.predict(X_test_norm) 
r2_score(Y_test_small, predicted) # 77.2 for small dataset

# Vizualizing our ANN model
ann_viz(model, view=True, title="Movie predictor ANN")

# Training ANN model with small normalized model

model = Sequential()
model.add(Dense(40, input_dim=27, 
                activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(10, activation='relu')) 
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train_std, Y_train_small, 
          epochs=500, verbose=0)
predicted = model.predict(X_test_std)
y_train3 = labelencoder_y_1.fit_transform(y_train)

classifier_NN1 = OneVsRestClassifier(
    KerasClassifier(build_fn=baseline_model1,
                    epochs=10,
                    batch_size=5,
                    verbose=0))
classifier_NN1.fit(X_train, y_train3)
y_score = classifier_NN1.predict_proba(X_test)
y_score_train = classifier_NN1.predict_proba(X_train)

plot_roc_auc(y_score, y_test1, 'nn1_auc_roc.png', 'SLP (test)')
plot_roc_auc(y_score_train, y_train1, 'nn1_train_auc_roc.png', 'SLP (train)')

from ann_visualizer.visualize import ann_viz
ann_viz(model, title="My first neural network")

#############################################################################################
#############################################################################################

#############################################################################################
#############################################################################################
############# NN - keras ####################################################################
#############################################################################################
#############################################################################################
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
예제 #26
0
#-----------------------------------------------------------------
    sc = StandardScaler()
    X_train = sc.fit_transform(X_train)
    X_test = sc.transform(X_test)
#-----------------------------------------------------------------
    
    def build_model():
        model = Sequential()
        model.add(Dense(64, activation='relu', input_dim=13))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(1))
        model.compile(optimizer='adam',loss='mse',metrics=['mae'])
        
        return model
    
#-----------------------------------------------------------------
    
	model = build_model()
    
    model.fit(X_train,y_train,batch_size=25,epochs=500)   
    
    y_pred=model.predict(X_test)
    
    scores = model.evaluate(X_test,y_test)
    
    print(scores)

#-----------------------------------------------------------------
   ann_viz(model, title="Neural Network for Boston Dataset")
#-----------------------------------------------------------------
예제 #27
0
            [1],
            [0],
            [1],
            [0],
            [0],
            [1]), dtype=float)


#Model
model = Sequential()
model.add(Dense(64, input_dim=3, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

# Compile model
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, epochs=500, batch_size=1)
_, accuracy = model.evaluate(X, y)
print('\n\nAccuracy: %.2f' % (accuracy*100))

print('\n\n')

predictions = model.predict_classes(X)
# summarize the first 5 cases
for i in range(8):
	print('%s => %d (expected %d)' % (X[i].tolist(), predictions[i], y[i]))

ann_viz(model, title="Multi-Layer Preceptron", view=True)
예제 #28
0
    Dense(output_dim=6, init='uniform', activation='relu', input_dim=11))

# Adding the second hidden layer
classifier.add(Dense(output_dim=6, init='uniform', activation='relu'))

# Adding the output layer
classifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))

# Compiling the ANN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size=10, nb_epoch=20)

# Part 3 - Making the predictions and evaluating the model

# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix

cm = confusion_matrix(y_test, y_pred)

from ann_visualizer.visualize import ann_viz

ann_viz(classifier, title="Diagram of Artificial Neural Network")