예제 #1
0
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
#No of hidden nodes
#Nh=Ns/(α∗(Ni+No))
#Ni  = number of input neurons.
#No = number of output neurons.
#Ns = number of samples in training data set.
#α = an arbitrary scaling factor usually 2-10.
Nh = int(891 / 32)
# Initialising the ANN
classifier = Sequential()

# Adding the input layer and the first hidden layer
classifier.add(
    Dense(units=Nh,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=15))
#classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dropout(0.01))

# Adding the second hidden layer
classifier.add(Dense(units=Nh, kernel_initializer='uniform',
                     activation='relu'))
#classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dropout(0.01))

## Adding the third hidden layer
#classifier.add(Dense(units = Nh, kernel_initializer = 'uniform', activation = 'relu'))
##classifier.add(LeakyReLU(alpha=0.1))
#classifier.add(Dropout(0.01))
예제 #2
0
        printPrecisionRecall('LightGBM', y_test, y_pred.round())
        clf_ldaShrinkage = LinearDiscriminantAnalysis(solver='lsqr',
                                                      shrinkage='auto').fit(
                                                          X_train_std, y_train)
        clf_lda = LinearDiscriminantAnalysis(solver='lsqr',
                                             shrinkage=None).fit(
                                                 X_train_std, y_train)
        y_pred_ldas = clf_ldaShrinkage.predict(X_test_std)
        y_pred_lda = clf_lda.predict(X_test_std)
        predictions = [round(value) for value in y_pred_ldas]
        accuracy = accuracy_score(y_test, predictions)
        print("shrink lda Accuracy: %.2f%%" % (accuracy * 100.0))
        predictions = [round(value) for value in y_pred_lda]
        accuracy = accuracy_score(y_test, predictions)
        print(" lda Accuracy: %.2f%%" % (accuracy * 100.0))
'''import tensorflow as tf 
from keras.models import Sequential
model = Sequential()
from keras.layers import Dense
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.SGD(lr=.01,momentum=.9,nesterov=True))
model.fit(x_train, y_train, epochs=5,batch_size=32)
model.train_on_batch(x_batch,y_batch)
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
classes=model.predict(x_test,batch_size=128)
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential([Dense(32,input_shape=(784,)),
				Activation('relu'),
예제 #3
0
# Random seeds
np.random.seed(123)
rn.seed(123)
tf.set_random_seed(123)

# Convert Attrition to one-hot encoding for NN to be able to read
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# Neural Network Architecture

# Create initial set of linear layers
model=Sequential()
# Now, add to our linear layers and note their neurons in each added layer
# Input dimension only needs to be noted for the first layer and it is the number of features/columns
model.add(Dense(input_dim=17, units=8, activation='relu', name='output_1'))
model.add(Dense(units=16, activation='relu', name='output_2'))
# Make sure output later has two neurons for each type of classification of attrition
model.add(Dense(units=2, activation='sigmoid'))

# Compile the Network
# More information on optimizer types:
# https://keras.io/optimizers/
model.compile(optimizer=Adam(lr=0.01), loss='binary_crossentropy', metrics=['accuracy'])
# loss='binary_crossentropy' specifies that your model should optimize the log 
# loss for binary classification.  
# metrics=['accuracy'] specifies that accuracy should be printed out

# Review NN configuration
model.summary()
예제 #4
0
#y_pred22 = rfc2.predict(df_test)
#
#y_pred222 = pd.DataFrame(data = y_pred22, index = range(418), columns=["Survived"] )
#s2= pd.concat([df_nihai,y_pred222],axis=1)
#s2.to_csv('titanic_nihai.csv',index=False)
#




import keras
from keras.models import Sequential
from keras.layers import Dense

classifier = Sequential()
classifier.add(Dense(4,init = "uniform",activation = "relu", input_dim = 8))
classifier.add(Dense(4,init = "uniform",activation = "relu"))
classifier.add(Dense(1,init = "uniform",activation = "sigmoid"))

classifier.compile(optimizer = "adam", loss = "binary_crossentropy" , metrics = ["accuracy"])

classifier.fit(X_train,y_train,epochs = 250)
y_predYSA = classifier.predict(X_test)

y_predYSA = (y_pred > 0.5)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
sc = accuracy_score(y_test, y_predYSA, normalize=False)
print(sc)
cm = confusion_matrix(y_test, y_predYSA)
print("YSA")
예제 #5
0
    if (model_type == 'ANN'):
        from keras.layers.core import Dense, Dropout, Activation
        from keras.models import Sequential
        from keras.callbacks import EarlyStopping

        num_feats = len(x_train[0])
        model = Sequential()

        early_stop = EarlyStopping(monitor='loss',
                                   patience=0,
                                   verbose=1,
                                   min_delta=0.005,
                                   mode='auto')

        model.add(Dense(num_feats, activation='relu', input_dim=(num_feats)))
        model.add(Dropout(0.50))
        model.add(Dense(500, activation='relu', kernel_initializer='uniform'))
        model.add(Dropout(0.50))
        model.add(Dense(2, kernel_initializer='uniform', activation='softmax'))

        model.compile(loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'],
                      optimizer='adam')

        model.fit(x_train,
                  y_train,
                  epochs=25,
                  verbose=1,
                  callbacks=[early_stop])
예제 #6
0
            t_nn[i] = clock()

            X_train_nn = preprocessing.scale(X_train)
            X_test_nn = preprocessing.scale(X_test)
            X_train_train_nn = preprocessing.scale(X_train_train)
            X_train_validate_nn = preprocessing.scale(X_train_validate)

            X_train_nn = X_train
            X_test_nn = X_test
            X_train_train_nn = X_train_train
            X_train_validate_nn = X_train_validate

            model = K.Sequential()
            model.add(
                K.layers.Dense(128,
                               input_dim=X_train_nn.shape[1],
                               activation='sigmoid'))
            model.add(K.layers.Dense(64, activation='sigmoid'))
            model.add(K.layers.Dense(32, activation='sigmoid'))
            model.add(K.layers.Dense(1))
            model.summary()

            model.compile(optimizer='adam',
                          loss='mean_squared_error',
                          metrics=['mean_squared_error'],
                          loss_weights=None,
                          sample_weight_mode=None,
                          weighted_metrics=None,
                          target_tensors=None)

            model.fit(x=X_train_nn,
예제 #7
0
		elif(model_type == 'ANN'):
			from keras.layers.core import Dense, Dropout, Activation
			from keras.models import Sequential
			from keras.utils import np_utils, to_categorical
			from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau

			y_train = to_categorical(y_train, num_classes)
			y_test  = to_categorical(y_test, num_classes)

			patience = 16
			early_stop = EarlyStopping(monitor='loss', patience=patience, verbose=1, min_delta=0.005, mode='auto')
			model_save = ModelCheckpoint("best_model.hdf5",monitor='loss', verbose = 0, save_best_only =True, save_weights_only = False, mode ='auto', period =1)
			reduce_LR = ReduceLROnPlateau(monitor='loss', factor= 0.1, patience=(patience/2), verbose = 0, min_delta=0.005,mode = 'auto', cooldown=0, min_lr=0)

			model = Sequential()
			model.add(Dense(num_feats,activation='relu',input_dim=(num_feats)))
			model.add(Dropout(0.5))
			model.add(Dense(int((num_feats+num_classes)/2), activation='relu', kernel_initializer='uniform'))
			model.add(Dropout(0.5))
			model.add(Dense(num_classes, kernel_initializer='uniform', activation='softmax'))

			model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer='adam')

			model.fit(x_train, y_train, epochs=100, verbose=1, callbacks=[early_stop, reduce_LR])
		else:
			raise Exception('Unrecognized Model. Use XGB, SVM or ANN')

		if(model_type == 'ANN'):
			results = ann_1d(model, x_test, y_test, 0)
			#OBOResults = ann_1d(model, x_test, y_test, 1)
		else:
예제 #8
0
                           n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_


# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense

# Initialising the ANN
classifier = Sequential()

# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 8))

# Adding the second hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))

# Adding the third hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))


# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))

# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

# Fitting the ANN to the Training set
x_test = sc.transform(x_test)
X = sc.fit_transform(X)
X_test = sc.transform(X_test)

# Making the artificial neural network

# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout

# Initialising the ANN
classifier = Sequential()

# Adding the input layer and the first hidden layer
classifier.add(Dense(activation="relu", input_dim=1000, units=300))
classifier.add(Dropout(rate=0.05))

# Adding the second hidden layer
classifier.add(Dense(activation="sigmoid", units=300))
classifier.add(Dropout(rate=0.05))

# Adding the output layer
classifier.add(Dense(activation="softmax", units=9))

# Compiling the ANN
classifier.compile(optimizer='adam',
                   loss='sparse_categorical_crossentropy',
                   metrics=['accuracy'])

# Fitting the ANN to the Training set
x_dev = selection.transform(x_dev)
test = selection.transform(test)
print x_train.shape

poly_2 = PolynomialFeatures(2)
x_train = poly_2.fit_transform(x_train)
x_dev = poly_2.fit_transform(x_dev)
test = poly_2.fit_transform(test)
print x_train.shape
'''model2 = XGBClassifier()
model2.fit(x_train, y_train)'''

print "Training..."
seed = 7
np.random.seed(seed)
'''
model = Sequential()
model.add(Dense(528, input_dim = 528, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
model.summary()
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
model.fit(np.array(x_train), np.array(y_train), nb_epoch = 5, batch_size = 64, verbose = 1)'''

# Logistic Regression
'''start_time = time.clock()
#log_Reg = BalancedBaggingClassifier(base_estimator= linear_model.LogisticRegression(C = 1e10, class_weight = {0:1, 1:1}), ratio={0:3, 1:1} ,replacement=True, random_state=0, max_samples=0.1, n_estimators = 10)
#log_Reg = AdaBoostClassifier(base_estimator=linear_model.LogisticRegression(C = 1e10), n_estimators=20)
log_Reg = linear_model.LogisticRegression(C = 1e10) # Default C = 1, but we can tune this hyper parameter
예제 #11
0
model.fit(x_train, y_train)
# score=0.52721
#%%
prediction = model.predict(x_test)
#%%
score = f1_score(y_test, prediction, average='weighted')
#score=0.52721
#%%
import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import Adam
from keras.utils import to_categorical
#%%
model = Sequential()
model.add(Dense(200, input_shape=(x_train.shape[1], )))
model.add(Dense(200))
model.add(Dropout(0.2))
model.add(Dense(150))
model.add(Dense(50))
model.add(Dense(6, activation='softmax'))
#%%

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['acc'])

#%%

model.fit(x_train, y_train, epochs=1000)
#%%
예제 #12
0
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)

Xfpr = []

tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 1
historySet = []
cvscores = []

#model definition
model = Sequential()
model.add(
    Dense(12,
          input_dim=inDimen,
          kernel_initializer='uniform',
          activation='relu',
          kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(
    Dense(9,
          kernel_initializer='uniform',
          activation='relu',
          kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(
    Dense(8,
          kernel_initializer='uniform',
          activation='relu',
          kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
예제 #13
0
    # Show al the graphics
    graphics.show()

elif algorithm == '2':

    print('1- Load previous model')
    print('2- Train model')
    option = input('Choose option: ')

    if option == '2':

        # Create de model
        model = Sequential()

        # Add layers to the model
        model.add(Dense(64, input_dim=7, activation='relu'))
        model.add(Dense(32, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))

        # Compile the model with an Adam optimizar and a learning rate of 0.02
        model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.0001))
        checkpoint = ModelCheckpoint('keras_models/weights.hdf5',
                                     monitor='val_loss',
                                     verbose=0,
                                     save_best_only=True,
                                     mode='auto')

        # Train the model
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_test, y_test),