model = Sequential()


model.add(Dense(5, kernel_initializer = 'uniform', activation = 'relu', input_dim = 5))
model.add(Dense(5, kernel_initializer = 'uniform', activation = 'relu'))
model.add(Dense(1, kernel_initializer = 'uniform', activation = 'sigmoid'))

# %% [markdown]
# **The rectified linear unit (relu) activation function** is used as a good general activation function for the first two layers, while the sigmoid activation function is required for the final layer as the output you want (of whether a passenger survives or not) needs to be scaled in the range of 0-1 (the probability of a passenger surviving).

# %%
model.summary()


# %%
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=32, epochs=50)

# %% [markdown]
# ### With the model built and trained its now time to see how it performs against the test data.

# %%
y_pred = model.predict_classes(X_test)
print(metrics.accuracy_score(y_test, y_pred))


# %%



# %%
Exemplo n.º 2
0
classificador.add(Dropout(dropout))

classificador.add(
    Dense(units=neurons,
          activation=activation,
          kernel_initializer=kernel_initializer))
classificador.add(Dropout(dropout))

classificador.add(Dense(units=1, activation='sigmoid'))

opt = keras.optimizers.Adam(learning_rate=learning_rate,
                            decay=decay,
                            beta_1=beta_1,
                            beta_2=beta_2)

classificador.compile(optimizer=opt, loss=loss, metrics=['binary_accuracy'])

classificador.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)

qtd_param = classificador.count_params()

print('Number of Parameters: ', qtd_param)

print('Calculating the ROC curve...')

previsoes_rna = classificador.predict(x_valid)
prob_rna = previsoes_rna
previsoes_rna = (previsoes_rna > 0.5)
previsoes_num_rna = []
for i in previsoes_rna:
    if i:
Exemplo n.º 3
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision

def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))
# define the keras model
model = Sequential()
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile the keras model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit(np.asarray(X_train), np.asarray(y_train), epochs=15, batch_size=1)
# evaluate the keras model
_, accuracy = model.evaluate(np.asarray(X_test), np.asarray(y_test))
print('Accuracy: %.2f' % (accuracy*100))
# make class predictions with the model
expected = np.asarray(y_test)
predicted = model.predict_classes(np.asarray(X_test))
# summarize the fit of the model
classification_model = metrics.classification_report(expected, predicted)
confusion_model = metrics.confusion_matrix(expected, predicted)
accuracy_model = metrics.accuracy_score(expected, predicted)
print(classification_model)
print(confusion_model)
print("Accuracy NN with best parameters: ",accuracy_model)
Exemplo n.º 4
0
# Adding the input layer and the first hidden layer
classifier.add(
    Dense(output_dim=2500,
          init='uniform',
          activation='sigmoid',
          input_dim=2500))
#################################################################
#regularization and tuning is not shown in this file#
#################################################################
# Adding the second hidden layer
classifier.add(Dense(output_dim=1250, init='uniform', activation='sigmoid'))

# Adding the output layer
classifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))

# Compiling the ANN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size=10, nb_epoch=100)
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix

cm1 = confusion_matrix(y_test, y_pred)
#accuracy 98 % using Ann.
Exemplo n.º 5
0
    def get_estimator(self):
        estimator = self.kwargs.get("estimator", self.ESTIMATOR)
        # self.mlflow_log_param("model", estimator)
        # added both regressions for predicting scores and classifier for match outcomes
        # elif estimator == 'Linear':
        #     model = LinearRegression()
        # elif estimator == 'RandomForestRegressor':
        #     model = RandomForestRegressor()
        # elif estimator == 'Lasso':
        #     model = Lasso()
        # elif estimator == "Ridge":
        #     model = Ridge()
        # elif estimator == "GBM":
        #     model = GradientBoostingRegressor()
        # elif estimator == "KNNRegressor":
        #     model = KNeighborsRegressor()
        if estimator == 'GaussianNB':  # No proba parameter needed
            model = GaussianNB()
        # elif estimator == 'LDA':
        #     self.model_params = {'solver': ['lsqr','eigen'],  #note svd does not run with shrinkage and models using it will be tuned separately
        #                           'n_components': [1.0,2.0,3.0,4.0,5.0]}
        #     model = LinearDiscriminantAnalysis()
        # elif estimator == "xgboost":
        #     model = XGBRegressor()
        # classification models
        if estimator == 'Logistic':  # No proba parameter needed
            self.model_params = {'C': np.arange(0.001, 1000)}
            #model = LogisticRegression(C=20.000999999999998)
            model = LogisticRegression()
        # elif estimator == 'LDA':
        #     model = LinearDiscriminantAnalysis()
        elif estimator == 'RandomForestClassifier':  # No proba parameter needed
            self.model_params = {
                'bootstrap': [True, False],
                'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
                'max_features': ['auto', 'sqrt'],
                'min_samples_leaf': [1, 2, 4],
                'min_samples_split': [2, 5, 10],
                'n_estimators':
                [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]
            }
            #model = RandomForestClassifier(n_estimators=1800, n_jobs=-1,max_depth=100,min_samples_split=5,bootstrap=False)
            model = RandomForestClassifier()
        elif estimator == "RidgeClassifier":  # No predict_proba
            self.model_params = {"alpha": np.arange(0.001, 1000)}
            model = RidgeClassifier(alpha=106.00099999999999)
            # model = RidgeClassifier()
            # model = GridSearchCV(estimator=grid, param_grid=dict(alpha=alphas))
        elif estimator == "KNNClassifier":  # No Proba parameter needed
            self.model_params = {
                "leaf_size": range(1, 1000),
                "n_neighbors": range(1, 1000),
                "p": [1.0, 2.0]
            }
            #model = KNeighborsClassifier(leaf_size=336,n_neighbors=913,p=2.0) #positive results
            model = KNeighborsClassifier()
            # model = GridSearchCV(knn, hyperparameters, cv=10)
        elif estimator == "XGBClassifier":  # Proba: Returns array with the probability of each data example being of a given class.
            self.model_params = {
                'max_depth': range(2, 20, 2),
                'n_estimators': range(60, 220, 40),
                'learning_rate': [0.3, 0.1, 0.01, 0.05],
                'min_child_weight': [1.0, 3.0, 5.0],
                'gamma': [1.0, 3.0, 5.0]
            }
            #model = XGBClassifier(max_depth=14,n_estimators=60,learning_rate=0.1,min_child_weight=1.0,gamma=5.0) #positive results
            # model = XGBClassifier(max_depth=18,n_estimators=60,learning_rate=0.05,min_child_weight=5,gamma=3.0) #positive results
            model = XGBClassifier()
            # model = GridSearchCV(XGB, param_grid=params_1, cv=5)
        elif estimator == "Dummy":
            model = DummyClassifier(strategy='uniform', random_state=15)
        elif estimator == "SVC":
            self.model_params = {
                'C': [0.1, 1, 10, 100, 1000],
                'gamma': [0.01, 0.001],
                'kernel': ['rbf', 'poly', 'sigmoid']
            }
            # model = SVC(kernel='sigmoid', C=80,gamma=0.001,probability=True)
            model = SVC(probability=True)

        elif estimator == "Sequential":
            model = Sequential()
            model.add(Flatten())
            model.add(BatchNormalization())
            model.add(Dense(32, activation='relu'))
            model.add(Dense(32, activation='relu'))
            model.add(Dense(16, activation='relu'))
            model.add(
                Dense(8,
                      kernel_regularizer=regularizers.l2(0.003),
                      activation='relu',
                      input_shape=(10000, )))
            model.add(
                Dense(8,
                      kernel_regularizer=regularizers.l2(0.003),
                      activation='relu'))
            model.add(Dense(1, activation='sigmoid'))
            # model.add(SimpleRNN(1, input_shape=[None, 1], activation='tanh'))
            model.compile(loss='binary_crossentropy',
                          optimizer='Adam',
                          metrics=['accuracy'])

        else:
            self.model_params = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
            model = LogisticRegression()

        estimator_params = self.kwargs.get("estimator_params", {})
        if estimator != "Sequential":
            model.set_params(**estimator_params)
        return model
Exemplo n.º 6
0
print("\b]")
"""## Alternative with Tensorflow"""

import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(7, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(7, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(3, activation=tf.nn.softmax))

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

training = model.fit(Xtrain.values,
                     ytrain.values,
                     validation_split=0.1,
                     epochs=200,
                     batch_size=10,
                     verbose=2)

model.summary()
"""## Testing the accuracy"""

accuracy = model.evaluate(Xtrain, ytrain)
print(accuracy)
Exemplo n.º 7
0
x_train, x_test, y_train, y_test = train_test_split(
    data[['sex', 'pclass', 'age', 'relatives', 'fare']], data.survived, test_size=0.2, random_state=0)
# %%
sc = StandardScaler()
X_train = sc.fit_transform(x_train)
X_test = sc.transform(x_test)
# %%
model = GaussianNB()
model.fit(X_train, y_train)
# %%
predict_test = model.predict(X_test)
print(metrics.accuracy_score(y_test, predict_test))
# %%

model = Sequential()
# %%
model.add(Dense(5, kernel_initializer='uniform',
                activation='relu', input_dim=5))
model.add(Dense(5, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# %%
model.summary()
# %%
model.compile(optimizer="adam", loss="binary_crossentropy",
              metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=32, epochs=50)
# %%
y_pred = model.predict_classes(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# %%
InteractiveShell.ast_node_interactivity = "all"