Ejemplo n.º 1
0
    def _model_build(self, *arg):
        self._prepare_test_data()
        model = KerasClassifier(
            build_fn=self.create_model, verbose=0)
        optimizers = [
            'adam']
        init = [
            'normal', 'uniform']
        epochs = [
            100, 150]
        batches = [
            5, 10]
        param_grid = dict(
            optimizer=optimizers, nb_epoch=epochs, batch_size=batches, init=init)
        grid = GridSearchCV(
            estimator=model, param_grid=param_grid, cv=5)
        grid_result = grid.fit(
            self.x_train, self.y_train)
        print("Best: %f using %s" % (
            grid_result.best_score_, grid_result.best_params_))
        # means = grid_result.cv_results_[
        #     'mean_test_score']
        # stds = grid_result.cv_results_[
        #     'std_test_score']
        # params = grid_result.cv_results_[
        #     'params']
        # for mean, stdev, param in zip(means, stds, params):
        #     print("%f (%f) with: %r" % (
        # mean,
        # stdev,
        # param))

        # Training
        # with Best
        # Parameter
        model = Sequential()
        model.add(Dense(
            12, input_dim=8, init=grid_result.best_params_['init'], activation='relu'))
        model.add(Dense(
            8, init=grid_result.best_params_['init'], activation='relu'))
        model.add(Dense(
            1, init=grid_result.best_params_['init'], activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer=grid_result.best_params_['optimizer'], metrics=['accuracy'])
        # Compile
        # model
        model.fit(
            self.x_train, self.y_train, nb_epoch=grid_result.best_params_['nb_epoch'], batch_size=grid_result.best_params_['batch_size'])
        yy_pred = model.predict(
            self.x_test)
        self.y_pred = [np.round(
            x) for x in yy_pred]
        self.y_true = self.y_test
        self.prob = model.predict_proba(
            self.x_test)
        self._analyse_result()
                  epochs=epochs,
                  optimizer=optimizer,
                  activation=activation,
                  dropout_rate=dropout_rate)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(images_train, labels_train)

print("Best: %f using %s" %
      (grid_result.best_score_, grid_result.best_params_))
x = grid_result.best_params_
a, b, c, d, e = x.values()

#Building model with best parameters
model = models.Sequential()

model.add(
    layers.Conv2D(32, (5, 5), activation='relu', input_shape=(299, 299, 3)))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(64, (5, 5), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.AveragePooling2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dense(9, activation=a))
model.add(layers.Dropout(rate=c))
model.add(layers.Dense(9, activation='softmax'))

#Compiling
model.compile(loss='categorical_crossentropy',
Ejemplo n.º 3
0
X = X_values.reshape((X_values.shape[0], 1, X_values.shape[1]))
Y = y.values

split_value = 1500
test_X = X[split_value:, :]
test_Y = Y[split_value:]

train_X = X[:split_value, :]
train_Y = Y[:split_value]

opt = Adamax(lr=0.01)
opt = Adam(lr=0.01)
opt = SGD(lr=0.01, momentum=0.0)

model = Sequential()
model.add(LSTM(40, return_sequences=True))
model.add(Dropout(0.0))
model.add(Dense(1))
model.add(Flatten())
model.compile(loss='mean_absolute_error', optimizer=opt, metrics=['accuracy'])

history = model.fit(train_X,
                    train_Y,
                    epochs=100,
                    batch_size=40,
                    validation_data=(test_X, test_Y),
                    verbose=1)

print(history.history.keys())
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
                             y=y_train,
                             cv=10,
                             n_jobs=-1)
print("Results: %.2f%% (%.2f%%)" %
      (accuracies.mean() * 100, accuracies.std() * 100))

# checking for overfitting (Dropout Regularization to reduce overfitting if needed)
# classifier.add(Dropout(p = 0.1))

########################## manually running model
seed = 7
np.random.seed(seed)
classifier = Sequential()
classifier.add(
    Dense(units=45,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=15))
#classifier.add(Dropout(rate=0.1))
classifier.add(Dense(units=30, kernel_initializer='uniform',
                     activation='relu'))
#classifier.add(Dropout(rate=0.3))
classifier.add(
    Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
hist = classifier.fit(X_train, y_train, batch_size=1, epochs=100)

y_pred = classifier.predict(X_test)
y_pred = (y_pred >= 0.5)
Ejemplo n.º 5
0
                       metrics=['accuracy'])
    return classifier


classifier = KerasClassifier(build_fn=build_classifier,
                             batch_size=10,
                             epochs=30,
                             verbose=1)
accuracies = cross_val_score(classifier, X_train, y_train, cv=10)

from keras.layers import Dropout

classifier = Sequential()
classifier.add(
    Dense(6,
          activation='relu',
          kernel_initializer='uniform',
          input_shape=(11, )))
classifier.add(Dropout(rate=.1))
classifier.add(Dense(6, activation='relu', kernel_initializer='uniform'))
classifier.add(Dropout(rate=.1))
classifier.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
classifier.fit(X_train,
               y_train,
               batch_size=10,
               epochs=100,
               callbacks=[early_stop])
Ejemplo n.º 6
0
grid = GridSearchCV(estimator=model,
                    param_grid=param_grid3,
                    n_jobs=1,
                    scoring='f1_weighted',
                    verbose=1)
grid_result = grid.fit(Xtrain_encoded, Ytrain_integer)
print("Best: %f using %s" %
      (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))

#final model
model = Sequential()
model.add(Embedding(20000, 100, input_length=MAX_SEQUENCE_LENGTH))
model.add(LSTM(100))
model.add(Dropout(0.3))
model.add(Dense(16, activation='relu'))
model.compile(loss='categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])
model.fit(Xtrain_encoded,
          Ytrain_encoded,
          validation_split=0.2,
          epochs=20,
          batch_size=16,
          verbose=1)
loss, accuracy = model.evaluate(Xtest_encoded, Ytest, verbose=1)
print('Accuracy: %f' % (accuracy * 100))
Ejemplo n.º 7
0
batch_size = [50,100,150,10000,30000]

param_grid = dict(dropout_prob=dropout_prob, num_layers=num_layers, batch_size=batch_size)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X_train_processed_norm, y_train_encoded)

print("Best validation accuracy: %f using %s" % (grid_result.best_score_, grid_result.best_params_))


# Tune number of epochs using the tuned value of other hyper-parameters obtained above

# In[16]:


model = Sequential()
model.add(Flatten(input_shape=X_train.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5',verbose=1, save_best_only=True)

results = model.fit(X_train_processed_norm, y_train_encoded, batch_size=150, epochs=30,
          validation_split=0.33, callbacks=[checkpointer],
          verbose=0, shuffle=True)

accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = 1)

mean=accuracies.mean()
variance=accuracies.std()


#improving the ANN
#Dropout regularization to reduce overfitting if needed


classifier =Sequential()
 
# adding Input layer and first hidden layer with dropout
classifier.add( Dense(output_dim=6,init= "uniform",activation="relu",input_dim=11))  
classifier.add(Dropout(p=0.1))
# adding second hidden layer
classifier.add( Dense(output_dim=6,init= "uniform",activation="relu")) 
classifier.add(Dropout(p=0.1))

 # adding output layer
 #if  we have more than 1 output layer we use  
classifier.add( Dense(output_dim=1,init= "uniform",activation="sigmoid"))  
 
 #compiling the ann
 #dependent variable has binary outcome we use loss function as binary_crossentropy
 #dependentvariable are more than two outcome then we use loss function as categorical_crossentropy
classifier.compile(optimizer="adam",loss= "binary_crossentropy", metrics=["accuracy"]) 
 

#Proporción 70% train, 15% validation, 15% test         
X_train, X_test, y_train, y_test = train_test_split(all_images, all_labels_images_grouped, test_size=0.30, random_state=42)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=42)

X_train = np.asarray(X_train)
X_test= np.asarray(X_test)
X_val= np.asarray(X_val)
y_train=np.asarray(pd.get_dummies(y_train))
y_val=np.asarray(pd.get_dummies(y_val))
y_test=np.asarray(pd.get_dummies(y_test))


model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(100, 800, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(3, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
Ejemplo n.º 10
0
class NeuralNetwork:
    def __init__(self, ds: Dataset):
        self.ds = ds

    def create_model(self, typ):
        clear_session()
        if typ == 'Sequential':
            self.model = Sequential()

    def add_final_layer(self):
        if self.ds.y_train.nunique() == 2:
            self.model.add(layers.Dense(1, activation='sigmoid'))
            self.model.compile(optimizer='adam',
                               loss='binary_crossentropy',
                               metrics=['accuracy'])

    def sequential(self, layers, activation='relu'):
        """Create a sequential Keras model"""
        self.create_model('Sequential')
        self.add_dense(layers, activation)
        self.add_final_layer()

    def add_dense(self, filters, activation='relu'):
        for x in filters:
            self.model.add(layers.Dense(x, activation=activation))

    def add_embedding_layer(self):
        if self.ds.weights is not None:
            self.model.add(
                layers.Embedding(input_dim=self.ds.vocab_size,
                                 output_dim=self.ds.weights.shape[1],
                                 input_length=self.ds.X_train.shape[1],
                                 weights=[self.ds.weights]))
        else:
            self.model.add(
                layers.Embedding(input_dim=self.ds.vocab_size,
                                 output_dim=self.ds.weights.shape[1],
                                 input_length=self.ds.X_train.shape[1]))

    def embedding_to_sequential(self, filters, activation='relu'):
        self.create_model('Sequential')
        self.add_embedding_layer()
        self.model.add(layers.GlobalMaxPool1D())
        self.add_dense(filters)
        self.add_final_layer()

    def cnn(self, cnn_filters, kernel_sizes, dense_filters, activation='relu'):
        self.create_model('Sequential')
        self.add_embedding_layer()
        self.add_cnn(cnn_filters, kernel_sizes)
        self.model.add(layers.GlobalMaxPool1D)
        self.add_dense(dense_filters)
        self.add_final_layer()

    def grid_search(self, param_grid, **kwargs):
        #kwargs: epoch=10, verbose=False
        #not sure if this works, might need to return model
        if 'kernel_size' in param_grid:
            self.model = KerasClassifier(build_fn=self.cnn, **kwargs)
        elif 'filters' in param_grid:
            self.model = KerasClassifier(build_fn=self.embedding_to_sequential,
                                         **kwargs)
        else:
            self.model = KerasClassifier(build_fn=self.sequential, **kwargs)
        grid = RandomizedSearchCV(estimator=self.model,
                                  param_distributions=param_grid,
                                  verbose=1)
        grid.fit(self.ds.X_train, self.ds.y_train)
        test_accuracy = grid.score(self.ds.X_test, self.ds.y_test)
        print(f'Test accuracy for best model is {test_accuracy}')
        return grid

    def add_cnn(self, filters, kernels):
        assert len(filters) == len(kernels)
        for i in range(len(filters)):
            self.model.add(layers.Conv1D(filters[i], kernels[i]))

    def train(self, **kwargs):
        self.model.fit(self.ds.X_train, self.ds.y_train, **kwargs)

    def evaluate(self):
        trainl, traina = self.model.evaluate(self.ds.X_train,
                                             self.ds.y_train,
                                             verbose=False)
        vall, vala = self.model.evaluate(self.ds.X_test,
                                         self.ds.y_test,
                                         verbose=False)
        print(f'Training loss {trainl}, training accuracy {traina}')
        print(f'Validation loss {vall}, validation accuracy {vala}')
        return vall, vala

    def plot_history(self, path):
        metrics = ['acc', 'val_acc', 'loss', 'val_loss']
        metric_dict = {
            'acc': ['Training Accuracy', 'b'],
            'val_acc': ['Validation Accuracy', 'r'],
            'loss': ['Training Loss', 'b'],
            'val_loss': ['Validation Loss', 'r']
        }

        plt.figure(figsize=(12, 5))
        plt.subplot(1, 2, 1)
        plt.title('Training and validation accuracy')
        plt.legend()
        for metric in metrics:
            if 'loss' in metric:
                plt.subplot(1, 2, 2)
            met = self.model.history.history[metric]
            x = range(1, len(met) + 1)
            plt.plot(x, met, metric_dict[met][1], label=metric_dict[met][0])

        plt.title('Training and validation loss')
        plt.legend()
        plt.savefig(path)
scaler_test_10=StandardScaler().fit(X_test)
X_test=scaler_test_10.transform(X_test)
pickle.dump( scaler_test_10, open( "scaler_test_10.p", "wb" ) )


X_train = np.array(X_train)
X_test= np.array(X_test)
X_val= np.array(X_val)
y_train=np.asarray(pd.get_dummies(y_train))
y_val=np.asarray(pd.get_dummies(y_val))
y_test=np.asarray(pd.get_dummies(y_test))


model = models.Sequential()
model.add(layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.01), input_shape=(32,)))
model.add(layers.Dense(128, activation='relu', ))
model.add(layers.Dense(64, activation='relu',))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['acc'])

model.summary()

history = model.fit(X_train,
                    y_train,
                    epochs=35,
                    batch_size=120, 
                    validation_data=(X_val, y_val))
Ejemplo n.º 12
0
# If you look at the full docs it will appear intimidating. We only
# need to set `units` in this lab. `Units` is the same as size or
# channels from the last lab. It just determines how many
# transformations we perform each step.

from keras.layers import LSTM
units = 32
lstm_layer = LSTM(units)

# This layer can be applied to a sequence of inputs $x_1, \ldots,
# x_T$, and the output will be the final features.

values_per_time = 1
input_shape = (input_length, values_per_time)
model = Sequential()
model.add(Reshape(input_shape))
model.add(lstm_layer)

# Note that we need to use a `Reshape` because we have 1 value at each
# time.  The LSTM layer allows `sequence_length x values per time` inputs as
# we will see later.

# To run the model it looks exactly as we saw the last couple weeks.

# take the first example as input
# input shape: num samples x input_length
# output shape: num samples x hidden size
inputs = tf.convert_to_tensor(df_train[features].iloc[:1])
output = model(inputs)
print(inputs.shape)
print(output.shape)
def create_model():
    # create model
    model = Sequential()
    model.add(Dense(12, activation='relu', input_shape=(11, )))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    #compile model
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model


#create a classifier for use in scikit-learn
seed = 7
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score

model = KerasClassifier(build_fn=create_model, epochs=150, batch_size=10)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print(results.mean())

#reducing overfitting using Dropout
model.add(Dropout(0.2))

#lift performance with learning rate schedule
sgd = SGD(lr=0.1, momentum=0.9, decay=0.0001, nesterov=False)
model.compile(loss='binary_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
Ejemplo n.º 14
0
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
used = []

for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))
    f.write("%f (%f) with: %r\n\n" % (mean, stdev, param))
    used.append(param)

########################################
## train the actual model with the best parameters

best = grid_result.best_params_

model = Sequential()
model.add(Dense(best['neurons'], input_dim=1000,
                activation=best['activation']))
model.add(Dropout(best['dropout_rate']))
if best['hidden_layers'] == 0:
    pass
else:
    for i in range(best['hidden_layers']):
        model.add(Dense(best['neurons'], activation=best['activation']))
        model.add(Dropout(best['dropout_rate']))
model.add(Dense(2, kernel_initializer='uniform', activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer=best['optimizer'],
              metrics=['accuracy'])

hist = model.fit(x_train,
                 y_train,
                 batch_size=best['batch_size'],
Ejemplo n.º 15
0
Plot a graph where 
    the x-axis is the number of hidden units, 
    and the y-axis is the model performance (two lines: one with test loss, and one with test accuracy)
    
'''
from keras.callbacks import History
history = History()

units = [2**i for i in range(1, 15)]
loss = []
accuracy = []
for i in units:
    print('\n', 'number of hidden units: ', i)
    model = Sequential()
    model.add(Dense(i, input_dim=input_unit_size, activation='relu'))
    model.add(Dropout(0))
    model.add(Dense(num_classes, activation='softmax'))
    sgd = SGD(lr=0.5)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    model_result = model.fit(X_train, y_train, validation_split=0.2)
    loss.append(model_result.history['val_loss'])
    accuracy.append(model_result.history['val_acc'])

print('\n\n\n', 'loss: ', '\n', loss)
print('\n\n\n', 'accuracy: ', '\n', accuracy)
print('\n\n\n')

plt.plot(units, loss, marker='o', color='r', label='loss')
Ejemplo n.º 16
0
                           cv = 10)

grid_search = grid_search.fit(X_train, y_train)
best_params = grid_search.best_params_
best_accuracy = grid_search.best_score_

## BEST PARAMS {optimizer = rmsprop, batch_size = 25, nb_epoch = 500}

######################################################################################################################
#################################### MAKING THE PREDICTION AND EVALUATING MODEL ######################################################

## Initialise the Artificial Neural Network
classifier = Sequential()

# Adding the input layer and the first hidden layer - best practice - avereage between input nodes and output nodes((11+1)/2)
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim=11))  ### relu for rectifier (hidden node)

## Adding the second hidden layer

classifier.add(Dense(output_dim = 16, init = 'uniform', activation = 'relu'))


classifier.add(Dense(output_dim = 16, init = 'uniform', activation = 'relu'))

classifier.add(Dense(output_dim = 16, init = 'uniform', activation = 'relu'))

## Adding the output layer  
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'tanh'))  ## only one node(yes/no) and activation function changed to sigmoid
                                                                                 ## if output variable is encoded then dim=3, activation = softmax

## Compiling the Artificial Neural Network - finding the best weight values with stochastic approach
Ejemplo n.º 17
0
# Dropout Regularization to reduce overfitting if needed

# Dropout: At each iteration of the training some neurons are randomly
# disables , to prevent them be highly dependent when they learn the correlations
# and therefore by overwriting these neurons the ANN learns several independent correlations in the data
# This prevents the neurons from learn too much and thus overfitting

# We can apply dropout in one or several layers
# When we have overfitting its better to apply dropout in all layers

# Creating an ANN with dropouts

classifier = Sequential()

classifier.add(
    Dense(output_dim=6, input_dim=11, init='uniform', activation='relu'))
classifier.add(Dropout(rate=0.1))

classifier.add(Dense(output_dim=6, init='uniform', activation='relu'))
classifier.add(Dropout(rate=0.1))

classifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))

classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Parameter tuning with GridSearch
# GridSearch test multiple combination of hyperparameters and it helps us identify the best combo for our dataset

# Tuning the ANN
model = KerasClassifier(build_fn=build_model, verbose=1)
hyperparameters = create_hyperparameters()

pipe = Pipeline([('minmax', MinMaxScaler()), ('model', model)])
search = RandomizedSearchCV(estimator=pipe,
                            param_distributions=hyperparameters,
                            n_iter=10,
                            n_jobs=-1,
                            cv=5)

search.fit(x_train, y_train)

# 평가하기

print("Score :", search.score(x_test, y_test))
''' 
model = Sequential()
model.add(Dense(3, input_dim=11, activation='relu'))
model.add(Dense(256))
model.add(Dense(5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(keep_prob))
model.add(Dense(7, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(3, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(1, activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
Ejemplo n.º 19
0
    classifier.add(Dense(units = 4, activation = 'relu', kernel_initializer = 'uniform'))
    #classifier.add(Dropout(0.1))
    classifier.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer = 'uniform'))
    classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])
    return classifier

classifier = KerasClassifier(build_fn = my_model)
parameters = {'batch_size':[25, 32], 'epochs':[100, 500], 'optimizer': ['adam', 'rmsprop'] }
grid_search = GridSearchCV(estimator = classifier, param_grid = parameters, scoring = 'accuracy', cv = 10 )
grid_search_result = grid_search.fit(X_train, y_train)
best_parameters = grid_search_result.best_params_
best_score = grid_search_result.best_score_

#Prediction after Tuning the ANN
classifier = Sequential()
classifier.add(Dense(units = 4, activation = 'relu', kernel_initializer = 'uniform', input_dim = 7))
#classifier.add(Dropout(0.1))
classifier.add(Dense(units = 4, activation = 'relu', kernel_initializer = 'uniform'))
#classifier.add(Dropout(0.1))
classifier.add(Dense(units = 4, activation = 'relu', kernel_initializer = 'uniform'))
#classifier.add(Dropout(0.1))
classifier.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer = 'uniform'))
classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.fit(X_train,y_train, batch_size = 32, epochs = 100)
#Evaluate the model
score = classifier.evaluate(X_train, y_train)
print('test loss', score[0])
print('test accuracy', score[1])
#Prediction on Test set
y_pred = classifier.predict(X_test)   
y_pred = (y_pred > 0.4)
Ejemplo n.º 20
0
                    param_grid=param_grid,
                    cv=ps,
                    refit=False,
                    verbose=3,
                    n_jobs=1)  #
grid_result = grid.fit(X, Y)

# summarize results
print("Best: %f using %s" %
      (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))
"""
# Build Keras model
model = Sequential()

model.add(Dense(50,activation='tanh',input_shape=(trainX.shape[1],)))
model.add(GaussianNoise(0.5))
model.add(Dense(50))
model.add(Dense(trainY.shape[1],activation='softmax'))


#sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

tb = TensorBoard(log_dir='/tmp/keras_logs/sg_class1')
Ejemplo n.º 21
0
model = KerasClassifier(build_fn=build_classifier, batch_size=5, epochs=200)
accuracies = cross_val_score(estimator=model, X=X_train, y=y_train,
                             cv=5)  #CV = K-fold cross val. splits
mean = accuracies.mean()
variance = accuracies.std()
print("Average accuracy:", mean, "Variance:", variance)

# Improving the ANN
# Dropout Regularization to reduce overfitting if needed
from keras.layers import Dropout

model = Sequential()
model.add(
    Dense(input_dim=7,
          units=8,
          kernel_initializer='uniform',
          activation='relu'))
model.add(Dropout(rate=0.1))

model.add(Dense(units=8, kernel_initializer='uniform', activation='relu'))
model.add(Dropout(rate=0.2))

model.add(Dense(units=3, kernel_initializer='uniform', activation='softmax'))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=5, epochs=200)

y_pred_drop = model.predict(X_test)
    for i in range(1, len(encoded)):
        sequence = encoded[i - 1:i + 1]
        sequences.append(sequence)
    print('Total Sequences: %d' % len(sequences))
    # pad sequences
    max_length = max([len(seq) for seq in sequences])
    sequences = pad_sequences(sequences, maxlen=max_length, padding='pre')
    print('Max Sequence Length: %d' % max_length)
    # split into input and output elements
    sequences = array(sequences)
    X, y = sequences[:, :-1], sequences[:, -1]
    y = to_categorical(y, num_classes=vocab_size)

    # define model
    model = Sequential()
    model.add(Embedding(vocab_size, 30, input_length=max_length - 1))
    model.add(LSTM(60))
    model.add(Dense(vocab_size, activation='softmax'))
    print(model.summary())
    # compile network
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    #model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    # fit network
    model.fit(X, y, epochs=500, verbose=2)
    # evaluate model

    # prepare the tokenizer on the source text
    tokenizer2 = Tokenizer()
Ejemplo n.º 23
0
test_features = conv_base.predict(np.array(test_X), batch_size=BATCH_SIZE, verbose=1)
val_features = conv_base.predict(np.array(valid_X), batch_size=BATCH_SIZE, verbose=1)
np.savez("train_features", train_features, train_label)
np.savez("test_features", test_features, target_test)
np.savez("val_features", val_features, valid_label)

train_features_flat = np.reshape(train_features, (48000, 1*1*512))
test_features_flat = np.reshape(test_features, (10000, 1*1*512))
val_features_flat = np.reshape(val_features, (12000, 1*1*512))

NB_TRAIN_SAMPLES = train_features_flat.shape[0]
NB_VALIDATION_SAMPLES = val_features_flat.shape[0]
NB_EPOCHS = 10

model = models.Sequential()
model.add(layers.Dense(512, activation='relu', input_dim=(1*1*512)))
model.add(layers.LeakyReLU(alpha=0.1))
model.add(layers.Dense(num_classes, activation='softmax'))

model.compile(
    loss='categorical_crossentropy',
    optimizer=optimizers.Adam(),
    metrics=['acc'])


reduce_learning = callbacks.ReduceLROnPlateau(
    monitor='val_loss',
    factor=0.2,
    patience=2,
    verbose=1,
    mode='auto',

# In[ ]:


############################second way #################################################


# In[8]:


# Instantiate a Sequential model
model = Sequential()

# Add a hidden layer of 64 neurons and a 20 neuron's input
model.add(Dense(64, input_shape=(19,), activation='relu'))

# Add an output layer of 3 neurons with sigmoid activation
model.add(Dense(9, activation='sigmoid'))

# Compile your model with adam and binary crossentropy loss
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.summary()


# In[5]:

Ejemplo n.º 25
0
activations = [ 'relu']
param_grid = dict(layers=layers, activation=activations, batch_size = [10], epochs=[10])

grid = RandomizedSearchCV(estimator=model, param_distributions=param_grid,cv=5)

grid_result = grid.fit(train_images,  to_categorical(train_labels))

print("Best Score:",grid_result.best_score_)
print("-------------------------------------------------------------")
print("Best Parameters selected:",grid_result.best_params_)

"""**So we have got the best hyper paramters that we will use to build our final model**"""

model=keras.Sequential()
# Adding the input layer and the first hidden layer
model.add(Dense(128,activation='relu',kernel_initializer = 'he_uniform',input_dim=784))

# Adding the second hidden layer
model.add(Dense(units = 64, kernel_initializer = 'he_uniform',activation='relu'))
# Adding the output layer
model.add(Dense(units= 10, kernel_initializer = 'glorot_uniform', activation = 'softmax'))

# Compiling the ANN
model.compile(optimizer = 'Adamax', loss = 'categorical_crossentropy', metrics = ['accuracy'])

from keras.utils import to_categorical
# Fitting the ANN to the Training set
model_history=model.fit(train_images,
                             to_categorical(train_labels)
                            ,batch_size = 10, 
                              epochs = 20)
Ejemplo n.º 26
0
classifier = KerasClassifier(build_classifier, batch_size=5, epochs=100)
accuracies = cross_val_score(estimator=classifier,
                             X=X_train,
                             y=y_train,
                             cv=5,
                             n_jobs=1)

mean = accuracies.mean()

variance = accuracies.std()

#improving the ANN for overfitting sing dropout
from keras.layers import Dropout

classifier = Sequential()
classifier.add(
    Dense(4, input_dim=8, kernel_initializer='uniform', activation='relu'))
classifier.add(Dropout(rate=0.2))

classifier.add(Dense(4, kernel_initializer='uniform', activation='relu'))
classifier.add(Dropout(rate=0.2))

classifier.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

classifier.fit(X_train, y_train, epochs=50, batch_size=5)

#Tuning the ANN
from sklearn.model_selection import GridSearchCV
Ejemplo n.º 27
0
      (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))

#
#
#   RUN MODEL
#
#

model = Sequential()

model.add(Dense(20, activation='relu', input_dim=len(elo_data.columns) - 1))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.fit(X_train_elo,
          Y_train_elo,
          epochs=10,
          batch_size=50,
          validation_split=0.2,
          verbose=1)
model.test_on_batch(X_test_elo, Y_test_elo, sample_weight=None)
model.evaluate(X_test_elo, Y_test_elo, verbose=1)
Ejemplo n.º 28
0
                  metrics=['accuracy'])


# model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1, validation_data=(x_test, y_test))
# score = model.evaluate(x_test, y_test, verbose=0)
# print('Test loss: ', score[0])
# print('Test accuracy: ', score[1])

# 将定义好的model传入sklearn进行cross_validation
model = KerasClassifier(build_fn=create_model, nb_epoch=20, batch_size=128)
kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=41)
# results = cross_val_score(model, X, Y, cv=kfold, scoring='accuracy')
scores_history = []
for train_ind, test_ind in kfold.split(X, Y0):
    model = Sequential()
    model.add(Dense(units=512, input_dim=784, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(units=512, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(units=num_classes, activation='softmax'))

    model.summary()

    model.compile(optimizer=RMSprop(),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(X[train_ind],
              Y[train_ind],
              epochs=epochs,
              batch_size=batch_size,
              verbose=0)
Ejemplo n.º 29
0
                             y=y_train,
                             cv=10,
                             n_jobs=-1)
print("Results: %.2f%% (%.2f%%)" %
      (accuracies.mean() * 100, accuracies.std() * 100))

# checking for overfitting (Dropout Regularization to reduce overfitting if needed)
# classifier.add(Dropout(p = 0.1))

########################## manually running model
seed = 7
np.random.seed(seed)
classifier = Sequential()
classifier.add(
    Dense(units=66,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=22))
classifier.add(Dropout(rate=0.1))
classifier.add(Dense(units=44, kernel_initializer='uniform',
                     activation='relu'))
classifier.add(Dropout(rate=0.3))
classifier.add(
    Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
hist = classifier.fit(X_train, y_train, batch_size=1, epochs=100)

y_pred = classifier.predict(X_test)
y_pred = (y_pred >= 0.5)
#model = Sequential()
#model.add(Dense(units=16, kernel_initializer='uniform', activation='relu', input_dim=29))
#model.add(Dense(units=8, kernel_initializer='uniform', activation='relu'))
#model.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
#model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))

#model = Sequential()
#model.add(Dense(units=16, kernel_initializer='uniform', activation='relu', input_dim=29))
#model.add(Dense(units=8, kernel_initializer='uniform', activation='relu'))
#model.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
#model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))

#Construction du modèle
    model = Sequential()
    model.add(Dense(units=16, kernel_initializer='uniform', activation='relu', input_dim=4))
    model.add(Dense(units=8, kernel_initializer='uniform', activation='relu'))
    model.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
#    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

#  attributs_pertinents_DDoS_train=attributs_pertinents_DDoS_train.astype(float)
#  attributs_tag_train=attributs_tag_train.astype(float)

#Compilez le classifieur
#model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#Maintenant ajustons sur les données.
#model.fit(XTrain, yTrain, batch_size=1, epochs=120)
model.fit(attributs_pertinents_DDoS_train, attributs_tag_train, batch_size=1, epochs=120)