Example #1
0
class NeuralNetwork(object):
    def __init__(self, input_nodes, hidden_nodes, output_nodes, lr=None):
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes
        self.lr = lr
        self.scales_x = []
        self.scales_y = []

        input_kernel_range = np.sqrt(6) / (np.sqrt(input_nodes) + np.sqrt(hidden_nodes))
        input_kernel_initializer = RandomUniform(minval=-input_kernel_range, maxval=input_kernel_range)
        input_layer = Dense(input_nodes,
                            kernel_initializer=input_kernel_initializer,
                            name='input')

        hidden_kernel_range = np.sqrt(6) / (np.sqrt(hidden_nodes) + np.sqrt(output_nodes))
        hidden_kernel_initializer = RandomUniform(minval=-hidden_kernel_range, maxval=hidden_kernel_range)
        hidden_layer = Dense(hidden_nodes,
                             kernel_initializer=hidden_kernel_initializer,
                             name='hidden')

        output_layer = Dense(output_nodes,
                             name='output')

        self.model = Sequential()
        self.model.add(input_layer)
        self.model.add(hidden_layer)
        self.model.add(output_layer)

    def train(self, x_train, y_train):
        self.set_normalize_scales(x_train, y_train)
        x_train = self.normalize(x_train, self.scales_x)
        y_train = self.normalize(y_train, self.scales_y)

        optimizer = SGD(lr=self.lr)
        self.model.compile(loss='mse', optimizer=optimizer)
        self.model.fit(x_train, y_train, batch_size=20, epochs=500)

    def evaluate(self, x_test, y_test):
        x_test = self.normalize(x_test, self.scales_x)
        y_test = self.normalize(y_test, self.scales_y)
        return self.model.evaluate(x_test, y_test)

    def predict(self, x):
        x = self.normalize(x, self.scales_x)
        y = self.model.predict(x)
        return self.unnormalize(y, self.scales_y)

    def set_normalize_scales(self, x, y):
        for i in range(x.shape[1]):
            mean, std = x[:, i].mean(), x[:, i].std()
            self.scales_x.append([mean, std])
        for i in range(y.shape[1]):
            mean, std = y[:, i].mean(), y[:, i].std()
            self.scales_y.append([mean, std])

    @staticmethod
    def normalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = (data[:, i] - mean) / std
        return data

    @staticmethod
    def unnormalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = data[:, i] * std + mean
        return data
Example #2
0
#training the model
myModel = Sequential()
myModel.add(Dense(units=64, input_dim=13, activation='relu'))
myModel.add(Dense(units=8, activation='relu'))
myModel.add(Dense(1, activation='sigmoid'))

myModel.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])
myModel.fit(x=trainingDate, y=trainingLabels, epochs=2, batch_size=10)

print("Time point 2 is " + str(time.time() - startTime))

#produce test data set and evaluate the model.
testDIR = "F:/Riku_Ka/Training/test data with labels"

MFCCfiles = os.listdir(testDIR)
for f in MFCCfiles:
    tempFile = testDIR + '/' + f
    tempMatrix = np.loadtxt(tempFile)
    testData = tempMatrix[:, 1:]
    testLabels = tempMatrix[:, 0]
    print(testLabels)

    scores = myModel.evaluate(x=testData, y=testLabels)
    print("{0},{1}".format(myModel.metrics_names[1], scores[1] * 100))

print("Time point 3 is " + str(time.time() - startTime))

############################################
Example #3
0
cnn_model.fit(X_train,
              y_train,
              batch_size=512,
              epochs=100,
              verbose=1,
              validation_data=(X_validate, y_validate))

history = cnn_model.fit(X_train,
                        y_train,
                        batch_size=512,
                        epochs=100,
                        verbose=1,
                        validation_data=(X_validate, y_validate))

# Evaluation
evaluation = cnn_model.evaluate(X_test, y_test)
print('Test Accuracy : {:.3f}'.format(evaluation[1]))

# get the predictions for the test data
predicted_classes = cnn_model.predict_classes(X_test)

# Prediction class vs Test class
L = 5
W = 5
fig, axes = plt.subplots(L, W, figsize=(12, 12))
axes = axes.ravel()  #

for i in np.arange(0, L * W):
    axes[i].imshow(X_test[i].reshape(28, 28))
    axes[i].set_title(
        "Prediction Class = {:0.1f}\n True Class = {:0.1f}".format(
id2word = {i: word for word, i in word2id.items()}
print('---review with words---')
print([id2word.get(i, ' ') for i in X_train[6]])
print('---label---')
print(y_train[6])
print('Maximum review length: {}'.format(len(max((X_train + X_test),
                                                 key=len))))
print('Minimum review length: {}'.format(len(min((X_test + X_test), key=len))))
X_train = pad_sequences(X_train, maxlen=max_words)
X_test = pad_sequences(X_test, maxlen=max_words)
embedding_size = 32
model = Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
# model.add(LSTM(100))
# model.add(GRU(100))
model.add(Bidirectional(LSTM(100)))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]
model.fit(X_train2,
          y_train2,
          validation_data=(X_valid, y_valid),
          batch_size=batch_size,
          epochs=num_epochs,
          verbose=1)
scores = model.evaluate(X_test, y_test, verbose=1)
print('Test accuracy:', scores[1])
Example #5
0
from sklearn import preprocessing

labelEncoder = preprocessing.LabelEncoder()
labelEncoder.fit(dataset['diagnosis'])
dataset['diagnosis'] = labelEncoder.transform(dataset['diagnosis'])
dataset.info()
print(dataset.head())

dataset = dataset.values

X_values = sc.fit_transform(dataset[:, 2:32])

X_train, X_test, Y_train, Y_test = train_test_split(X_values,
                                                    dataset[:, 1],
                                                    test_size=0.25,
                                                    random_state=87)
my_first_nn = Sequential()  # create model
my_first_nn.add(Dense(10, input_dim=30, activation='relu'))  # hidden layer
my_first_nn.add(Dense(12, activation='softplus'))
my_first_nn.add(Dense(1, activation='sigmoid'))  # output layer
my_first_nn.compile(loss='binary_crossentropy',
                    optimizer='adam',
                    metrics=['acc'])
my_first_nn_fitted = my_first_nn.fit(X_train,
                                     Y_train,
                                     epochs=100,
                                     verbose=0,
                                     initial_epoch=0)
print(my_first_nn.summary())
print(my_first_nn.evaluate(X_test, Y_test, verbose=0))
Example #6
0
from keras import Sequential, callbacks
from keras.layers import Dense

dataset1 = numpy.loadtxt("data\\diabetes.csv", delimiter=',', skiprows=1)
print(dataset1.shape)

inputList = dataset1[:, 0:8]  # X
resultList = dataset1[:, 8]  # Y

model = Sequential()
model.add(Dense(10, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(inputList,
          resultList,
          epochs=200,
          batch_size=20,
          validation_split=0.25)
# evaluate
scores = model.evaluate(inputList, resultList)
print(type(model.metrics_names))
print(type(model.metrics_names))

print(f"{model.metrics_names[1]} value={scores[1] * 100}")
print(f"{model.metrics_names[0]} value={scores[0]}")
Example #7
0
transformedLabel = encoder.fit_transform(csv1['label'])
print(csv1['label'][:10])
print(transformedLabel[:10])

test_csv = csv1[25000:]
test_pat = test_csv[['weight', 'height']]
test_ans = transformedLabel[25000:]
train_csv = csv1[:25000]
train_pat = train_csv[['weight', 'height']]
train_ans = transformedLabel[:25000]

model = Sequential()
model.add(Dense(5, activation='relu', input_shape=(2, )))
model.add(Dense(3, activation='softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

tbCallback = callbacks.TensorBoard(log_dir="c:\\temp_phw", histogram_freq=1)
history = model.fit(train_pat,
                    train_ans,
                    batch_size=50,
                    epochs=50,
                    verbose=1,
                    validation_data=(test_pat, test_ans),
                    callbacks=[tbCallback])

score = model.evaluate(test_pat, test_ans, verbose=0)
print(f"accuracy={score[1]}, loss={score[0]}")
Example #8
0
ytrain=y[0:v1,]
ytest=y[v1:,]
#print(ytest.shape)


#Now ,we will create a sequential model using Keras
model=Sequential()
model.add(Dense(12, input_dim=8, activation='relu')) #shape of the input layer is defined here, in the first hidden layer. This is doing two things, defining the first(i/p) layer and first hidden layer.
model.add(Dense(8,activation='relu'))
model.add(Dense(1,activation='sigmoid'))

#Compile the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

#Now, we will fit the model, ie we will train(execute on some data) it.
model.fit(xtrain,ytrain, epochs=150, batch_size=10)

#evaluate(how well it is performing) the model
p,accuracy=model.evaluate(xtest,ytest)


#model.predict()
predictions=model.predict(xtest)

print(predictions)
#print(type(predictions))
print('Accuracy is %.2f', accuracy*100)



Example #9
0
        verbose=vb,
        shuffle=True,
        batch_size=batch_s)
ST2.summary()
## Test custom layer###########################

## Show learned weights:
ii = 1
for layer in ST2.layers:
    g = layer.get_config()
    h = layer.get_weights()
    print '------------------------------------ Layer ', str(ii)
    print g
    print h
    ii += 1

## Accuracy:
acc = ST.evaluate(x=X_test, y=Y_test, sample_weight=W_test)
print 'Acc: ', acc[1]

## AUC:
y_test_prd = ST.predict(x=X_test, batch_size=batch_s, verbose=vb)

## Pick signal probabilities:
#print y_test_prd.shape
y_test_prd = y_test_prd[:, 1]
#print y_test_prd.shape

AUC = roc_auc_score(y_test, y_test_prd, sample_weight=W_test)
print('AUC: {0}'.format(AUC))
Example #10
0
import matplotlib.pyplot as plt
import numpy as np

X = np.linspace(-1, 1, 200)
np.random.shuffle(X)
Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200, ))
plt.scatter(X, Y)
plt.show()

X_train, Y_train = X[:160], Y[:160]
X_test, Y_test = X[160:], Y[160:]
model = Sequential()
model.add(Dense(output_dim=1, input_dim=1))
model.compile(loss='mse', optimizer='sgd')

for step in range(501):
    cost = model.train_on_batch(X_train, Y_train)
    if step % 100 == 0:
        print("train cost", cost)

print('\nTesting----------')
cost = model.evaluate(X_test, Y_test, batch_size=40)
print('test cost', cost)
w, b = model.layers[0].get_weights()
print('weight=', w, "bias=", b)

Y_pred = model.predict(X_test)
plt.scatter(X_test, Y_test)
plt.plot(X_test, Y_pred)
plt.show()
Example #11
0
                               decay=1e-6,
                               momentum=0.9,
                               nesterov=True)
    model.compile(optimizer='sgd', loss='mse', metrics=['mae', 'mse'])

    print(model.summary())

    history = model.fit(X_train,
                        Y_train,
                        epochs=10,
                        batch_size=64,
                        validation_data=(X_test, Y_test),
                        callbacks=[es])
    preds = model.predict(X_test)
    preds1 = model.predict(x)
    result = model.evaluate(X_test, Y_test)
    loss = result[0]
    print(result)
    rmse_test = mean_squared_error(Y_test, preds)
    r2_test = r2_score(Y_test, preds)
    print("MSE of test set is {}".format(rmse_test))
    print("R score of test set is {}".format(r2_test))
    nrmse = cal_nrmse(Y_test, preds)
    print("nrmse of test set is {}".format(nrmse))
    print("original", y[0:10])
    y1 = []
    for i in range(len(y)):
        pr = y[i] * den
        cal = pr + add
        y1.append(cal)
    print("after", y1[0:10])
                          verbose=2,
                          validation_data=(test_input, test_label))

# def show_train_history(train_history, train, validation):
#     plt.plot(train_history.history[train])
#     plt.plot(train_history.history[validation])
#     plt.title('Train History')
#     plt.ylabel(train)
#     plt.xlabel('Epoch')
#     plt.legend(['train', 'validation'], loc='upper left')
#     plt.show()

# In[22]:
# show_train_history(train_history, 'acc', 'val_acc')
# # In[23]:
# show_train_history(train_history, 'loss', 'val_loss')
# # 评估模型的准确率
scores = model.evaluate(test_input, test_label, verbose=1, batch_size=256)
print('test loss:', scores[0])
print('test accuracy:', scores[1])

# predict = model.predict_classes(test_input)
# predict_classes = predict.reshape(2500)

model_json = model.to_json()
with open("model/author_Style_model.json", "w") as json_file:
    json_file.write(model_json)

model.save_weights("model/author_Style_model.h5")
print("Saved model to disk")
def alex_net(x_train, y_train, x_test, y_test):
    y_train = keras.utils.to_categorical(y_train, num_classes=flower_types)
    y_test = keras.utils.to_categorical(y_test, num_classes=flower_types)

    model = Sequential()
    model.add(
        Conv2D(96, (11, 11),
               strides=(4, 4),
               input_shape=(image_size, image_size, 3),
               padding='valid',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(256, (5, 5),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(
        Conv2D(384, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(flower_types, activation='softmax'))
    sgd = SGD(lr=1e-2, decay=1e-9)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    history = model.fit(x_train,
                        y_train,
                        validation_split=0.1,
                        batch_size=100,
                        epochs=epochs)
    loss, acc = model.evaluate(x_test, y_test, batch_size=50)
    print('loss is {:.4f}'.format(loss) +
          ', acc is  {:.2f}%\n'.format(acc * 100))
    model_name = 'result/alex_model_epoch' + str(epochs) + '_' + str(
        round(acc * 100, 2)) + '.h5'
    model.save(model_name)
    save_history(history, 'result', str(round(acc * 100, 2)))
    # 清除session
    keras.backend.clear_session()
    return model_name
Example #14
0
    def fitting(self):

        timesteps = self.lags  # tiempo
        features = 1  # features or chanels (Volume)
        num_classes = 3  # 3 for categorical

        #data = np.random.random((1000, dim_row, dim_col))
        #clas = np.random.randint(3, size=(1000, 1))
        ##print(clas)
        #clas = to_categorical(clas)
        ##print(clas)
        data = self.X_train
        data_test = self.X_test
        print(data)

        data = data.values.reshape(len(data), timesteps, 1)
        data_test = data_test.values.reshape(len(data_test), timesteps, 1)
        print(data)

        clas = self.y_train
        clas_test = self.y_test
        clas = to_categorical(clas)
        clas_test = to_categorical(clas_test)

        cat0 = self.y_train.tolist().count(0)
        cat1 = self.y_train.tolist().count(1)
        cat2 = self.y_train.tolist().count(2)

        print("may: ", cat1, "  ", "menor: ", cat2, " ", "neutro: ", cat0)

        n_samples_0 = cat0
        n_samples_1 = (cat1 + cat2) / 2.0
        n_samples_2 = (cat1 + cat2) / 2.0

        class_weight = {
            0: 1.0,
            1: n_samples_0 / n_samples_1,
            2: n_samples_0 / n_samples_2
        }

        def class_1_accuracy(y_true, y_pred):
            # cojido de: http://www.deepideas.net/unbalanced-classes-machine-learning/
            class_id_true = K.argmax(y_true, axis=-1)
            class_id_preds = K.argmax(y_pred, axis=-1)

            accuracy_mask = K.cast(K.equal(class_id_preds, 1), 'int32')
            class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                                      'int32') * accuracy_mask

            class_acc = K.sum(class_acc_tensor) / K.maximum(
                K.sum(accuracy_mask), 1)
            return class_acc

        class SecondOpinion(Callback):
            def __init__(self, model, x_test, y_test, N):
                self.model = model
                self.x_test = x_test
                self.y_test = y_test
                self.N = N
                self.epoch = 1

            def on_epoch_end(self, epoch, logs={}):
                if self.epoch % self.N == 0:
                    y_pred = self.model.predict(self.x_test)
                    pred_T = 0
                    pred_F = 0
                    for i in range(len(y_pred)):
                        if np.argmax(y_pred[i]) == 1 and np.argmax(
                                self.y_test[i]) == 1:
                            pred_T += 1
                        if np.argmax(y_pred[i]) == 1 and np.argmax(
                                self.y_test[i]) != 1:
                            pred_F += 1
                    if pred_T + pred_F > 0:
                        Pr_pos = pred_T / (pred_T + pred_F)
                        print("Yoe: epoch, Probabilidad pos: ", self.epoch,
                              Pr_pos)
                    else:
                        print("Yoe Probabilidad pos: 0")
                self.epoch += 1

#################################################################################################################

        model = Sequential()
        if self.nConv == 0:
            model.add(
                LSTM(units=self.lstm_nodes,
                     return_sequences=True,
                     activation='tanh',
                     input_shape=(timesteps, features),
                     kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
        for i in range(self.nLSTM - 2):
            model.add(
                LSTM(units=self.lstm_nodes,
                     return_sequences=True,
                     activation='tanh',
                     kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
        model.add(LSTM(units=self.lstm_nodes, activation='tanh'))
        model.add(Dropout(0.5))
        model.add(
            Dense(num_classes, activation='softmax')
        )  # the dimension of index one will be considered to be the temporal dimension
        #model.add(Activation('sigmoid'))  # for loss = 'binary_crossentropy'

        # haciendo x: x[:, -1, :], la segunda dimension desaparece quedando solo
        # los ULTIMOS elementos (-1) de dicha dimension:
        # Try this to see:
        # data = np.random.random((5, 3, 4))
        # print(data)
        # print(data[:, -1, :])

        #        model.add(Lambda(lambda x: x[:, -1, :], output_shape = [output_dim]))
        print(model.summary())

        tensorboard_active = False
        val_loss = False
        second_opinion = True
        callbacks = []
        if tensorboard_active:
            callbacks.append(
                TensorBoard(log_dir=self.putmodel + "Tensor_board_data",
                            histogram_freq=0,
                            write_graph=True,
                            write_images=True))
        if val_loss:
            callbacks.append(EarlyStopping(monitor='val_loss', patience=5))
        if second_opinion:
            callbacks.append(SecondOpinion(model, data_test, clas_test, 10))
        #model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = ['categorical_accuracy'])
        #model.compile(loss = 'binary_crossentropy', optimizer=Adam(lr=self.learning), metrics = ['categorical_accuracy'])
        model.compile(loss='categorical_crossentropy',
                      optimizer='Adam',
                      metrics=[class_1_accuracy])

        model.fit(x=data,
                  y=clas,
                  batch_size=self.batch_size,
                  epochs=800,
                  verbose=2,
                  callbacks=callbacks,
                  class_weight=class_weight)
        #validation_data=(data_test, clas_test))

        #####################################################################################################################

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("model.yaml", "w") as yaml_file:
            yaml_file.write(model_yaml)
        # serialize weights to HDF5
        model.save_weights("model.h5")
        print("Saved model to disk")

        #        # load YAML and create model
        #        yaml_file = open('model.yaml', 'r')
        #        loaded_model_yaml = yaml_file.read()
        #        yaml_file.close()
        #        loaded_model = model_from_yaml(loaded_model_yaml)
        #        # load weights into new model
        #        loaded_model.load_weights("model.h5")
        #        print("Loaded model from disk")
        #        loaded_model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = [class_1_accuracy])
        #
        print("Computing prediction ...")
        y_pred = model.predict_proba(data_test)

        model.reset_states()
        print("Computing train evaluation ...")
        score_train = model.evaluate(data, clas, verbose=2)
        print('Train loss:', score_train[0])
        print('Train accuracy:', score_train[1])

        model.reset_states()
        #        score_train_loaded = loaded_model.evaluate(data, clas, verbose=2)
        #        loaded_model.reset_states()
        #        print('Train loss loaded:', score_train[0])
        #        print('Train accuracy loaded:', score_train[1])

        print("Computing test evaluation ...")
        score_test = model.evaluate(data_test, clas_test, verbose=2)
        print('Test loss:', score_test[0])
        print('Test accuracy:', score_test[1])

        model.reset_states()
        #        score_test_loaded = loaded_model.evaluate(data_test, clas_test, verbose=2)
        #        loaded_model.reset_states()
        #        print('Test loss loaded:', score_test[0])
        #        print('Test accuracy loaded:', score_test[1])

        pred_T = 0
        pred_F = 0
        for i in range(len(y_pred)):
            if np.argmax(y_pred[i]) == 1 and np.argmax(clas_test[i]) == 1:
                pred_T += 1
#                print(y_pred[i])
            if np.argmax(y_pred[i]) == 1 and np.argmax(clas_test[i]) != 1:
                pred_F += 1
        if pred_T + pred_F > 0:
            Pr_pos = pred_T / (pred_T + pred_F)
            print("Yoe Probabilidad pos: ", Pr_pos)
        else:
            print("Yoe Probabilidad pos: 0")

        history = DataFrame([[
            self.skip, self.nConv, self.nLSTM, self.learning, self.batch_size,
            self.conv_nodes, self.lstm_nodes, score_train[0], score_train[1],
            score_test[0], score_test[1]
        ]],
                            columns=('Skip', 'cConv', 'nLSTM', 'learning',
                                     'batch_size', 'conv_nodes', 'lstm_nodes',
                                     'loss_train', 'acc_train', 'loss_test',
                                     'acc_test'))
        self.history = self.history.append(history)
Example #15
0
classificador = Sequential()
classificador.add(
    Dense(units=16,
          activation='relu',
          kernel_initializer='random_uniform',
          input_dim=30))

classificador.add(
    Dense(units=16, activation='relu', kernel_initializer='random_uniform'))

classificador.add(Dense(units=1, activation='sigmoid'))

otimizador = keras.optimizers.Adam(lr=0.001, decay=0.0001, clipvalue=0.5)

classificador.compile(optimizer=otimizador, loss='binary_crossentropy', metrics = \
                      ['binary_accuracy'])

classificador.fit(previssores_treinamento, classe_treinamento, batch_size = 10\
                  ,epochs=100)

pesos0 = classificador.layers[0].get_weights()
pesos1 = classificador.layers[1].get_weights()
pesos2 = classificador.layers[2].get_weights()

previsores = classificador.predict(previssores_teste)
previsores = (previsores > 0.5)

precissao = accuracy_score(classe_teste, previsores)
matrix = confusion_matrix(classe_teste, previsores)
resultado = classificador.evaluate(previssores_teste, classe_teste)
#print("DataFrame: {}".format(entrada))
Example #16
0
# start training
# train an example pre time (text length=100), and train all example 10 times
train_history = model.fit(x_train,
                          trainY_oneHot,
                          batch_size=50,
                          epochs=10,
                          verbose=2,
                          validation_split=0.2)

# show training history
import matplotlib.pyplot as plt


def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.xlabel('Epoch')
    plt.ylabel(train)
    plt.legend(['train', 'validation'])
    plt.show()


# using accuracy and loss plots helps to check overfitting problem
print(show_train_history(train_history, 'accuracy', 'val_accuracy'))
print(show_train_history(train_history, 'loss', 'val_loss'))

# evaluation
scores = model.evaluate(x_test, testY_oneHot)
print("Final accuracy", scores[1])
    total = np.sum([i for i in labels_dict.values()])
    keys = labels_dict.keys()
    class_weight = dict()
    for key in keys:
        score = math.log((mu * total) / float(labels_dict[key]))
        math.log(mu)
        class_weight[key] = score if score > 1.0 else 1.0
    return class_weight
labels_dict = {0: 37000, 1: 18871, 2: 11132, 3: 6062, 4: 4089, 5: 3496, 6: 677, 7: 583
    , 8: 378, 9: 44}
labels_dict_train ={0: 56000, 1: 40000, 2: 33393, 3: 18184, 4: 12264, 5: 10491, 6: 2000, 7: 1746
                   ,8:1133,9:130}
class_weight_dict = create_class_weight(labels_dict_train)
history = model.fit(train_70_x, train_70_y,validation_data=(train_30_x,train_30_y),batch_size=4096, epochs=50)
model.save('my_model4.h5')
loss, accuracy = model.evaluate(x_test, y_test2)
pre_y = model.predict_classes(x_test)
y_test = np.array(y_test)
metrics = classification_report(y_test, pre_y)
print(metrics)
confusion_m = confusion_matrix(y_test, pre_y)
y_pred_pro = model.predict_proba(x_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_pro, pos_label=1)
roc_auc = auc(fpr, tpr)
mat_plt(history)
plot_confusion_matrix(confusion_m)
roc(fpr, tpr, roc_auc)
model.save('my_modelm.h5')
def fpr_tpr(confusion_m):
    sum = 0
    count = 0
Example #18
0
model.add(Flatten())
#model.add(Dense(1024, activation='relu'))
model.add(Dense(2, activation='softmax'))

#Output Layer


#Compiling the neural network
model.compile(optimizer ='adam',loss='binary_crossentropy', metrics =['accuracy'])

history=model.fit(xtrain, ytrain, batch_size=32, epochs=30,validation_data=(xval, yval))


#eval_model = model.evaluate(xtest, ytest)
#print("Test accuracy: ", eval_model[1])
score, acc = model.evaluate(xtest, ytest, batch_size=32)
print('Test score:', score)
print('Test accuracy:', acc)
y_pred = model.predict(xtest)
y_pred = model.predict_classes(xtest)
conf_mat = confusion_matrix(y_true, y_pred)
accuracy = 0
if float(np.sum(conf_mat))!=0:
    accuracy = float(conf_mat[0,0]+conf_mat[1,1])/float(np.sum(conf_mat))
recall = 0
if float(conf_mat[0,0]+conf_mat[1,0])!=0:
    recall = float(conf_mat[0,0])/float(conf_mat[0,0]+conf_mat[1,0])
precision = 0
if float(conf_mat[0,0]+conf_mat[0,1])!=0:
    precision = float(conf_mat[0,0])/float(conf_mat[0,0]+conf_mat[0,1])
#f1 score = 0
Example #19
0
print(model.summary())
epochVal = 10
stepsPerEpoch = 2000
history = model.fit_generator(dataGen.flow(X_train, y_train, batch_size=50),
                              steps_per_epoch=stepsPerEpoch,
                              epochs=epochVal,
                              validation_data=(X_validation, y_validation),
                              shuffle=1)

plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training', 'validation'])
plt.title('Loss')
plt.xlabel = 'epoch'
plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training', 'validation'])
plt.title('Accuracy')
plt.xlabel = 'epoch'
plt.show()
score = model.evaluate(X_test, y_test, verbose=0)
print('Test Score = ', score[0])
print('Test Accuracy = ', score[1])

pickle_out = open("model_trained.p", "wb")
pickle.dump(model, pickle_out)
pickle_out.close()
Example #20
0
model.add(LSTM(64, return_sequences=True))
model.add(LSTM(64, return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())

# Split train and validation set (e.g: 10000 dataset -> 8000 train, 1000 validation, 1000 test)
train_valtest = 0.8
val_test = 0.5

train_x, train_y = train_X[:int(train_valtest*len(train_X))], train_Y[:int(train_valtest*len(train_Y))]
valtest_x, valtest_y = train_X[int(train_valtest*len(train_X)):], train_Y[int(train_valtest*len(train_Y)):]

val_x, val_y = valtest_x[:int(val_test*len(valtest_x))], valtest_y[:int(val_test*len(valtest_y))]
test_x, test_y = valtest_x[int(val_test*len(valtest_x)):], valtest_y[int(val_test*len(valtest_y)):]


model.fit(train_x, train_y, validation_data = (val_x, val_y), epochs=10, verbose=1)
scores = model.evaluate(test_x, test_y, verbose=0)

print('Test on %d samples' %len(test_x))
print('Test accuracy: %.2f%%' % (scores[1]*100))

model.save(getcwd()+'/GloVe-LSTM_model.h5')
print('Model saved to '+getcwd()+'/GloVe-LSTM_model.h5')

print("Total training time: %s seconds" % (time.time() - start_time))
Example #21
0
    model = Sequential()
    model.add(Embedding(500, 100, input_length=len(features)))
    model.add(Dropout(0.2))
    model.add(Conv1D(64, 5, activation='relu'))
    model.add(MaxPooling1D(pool_size=4))
    model.add(LSTM(100))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(x_train_tfidf,
              np.array(y_train),
              validation_split=0.4,
              epochs=3,
              verbose=1)
    scores = model.evaluate(x_test_tfidf, np.array(y_test), verbose=0)
    print("Classifier: ", classifier)
    print("POS: ", pos)
    print("Number of Features: ", len(features))
    print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
    print("\n-------------------------------------------------------------\n")

elif classifier == 'rnn':
    model = Sequential()
    model.add(Embedding(len(features), 100, input_length=len(features)))
    model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(x_train_tfidf,
img_rows, img_cols = 28, 28
X_train = x_train.astype('float32')
X_test = x_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = keras.utils.to_categorical(y_train, num_classes=nb_classes)
Y_test = keras.utils.to_categorical(y_test, num_classes=nb_classes)

nb_units = 100

model = Sequential()

model.add(LSTM(nb_units, activation='relu', input_shape=(img_cols, img_rows)))
model.add(Dropout(0.25))

model.add(Dense(nb_units, activation='relu'))
model.add(Dropout(0.35))

model.add(Dense(nb_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

epochs = 5

history = model.fit(X_train, Y_train, epochs=epochs, batch_size=64, verbose=2)

scores = model.evaluate(X_test, Y_test, verbose=2)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
Example #23
0
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(train_data, train_label, epochs=15, batch_size=64, validation_split=0.2)

import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) +1)
plt.plot(epochs, loss_values, 'bo', label='Training Loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss values')
plt.xlabel('Epochs')
plt.ylabel('Epochs')
plt.legend()
plt.show()

plt.clf()
acc = history_dict['acc']
val_acc = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()

print(model.evaluate(test_data, test_labels))
class QReplayDoubleActionPrior(AbstractModel):
    """ Prediction model which uses Q-learning and a neural network which replays past moves.

        The network learns by replaying a batch of training moves. The training algorithm ensures that
        the game is started from every possible cell. Training ends after a fixed number of games, or
        earlier if a stopping criterion is reached (here: a 100% win rate).

        :param class Maze game: Maze game object.
    """
    def __init__(self, game, **kwargs):
        super().__init__(game, **kwargs)
        self.game = game
        self.state_size = (2 + 2, )

        if kwargs.get("load", False) is False:
            self.model = Sequential()
            self.model.add(
                Dense(game.maze.size,
                      input_shape=self.state_size,
                      activation="relu"))
            self.model.add(Dense(game.maze.size, activation="relu"))
            self.model.add(Dense(len(actions)))
        else:
            self.load(self.name)

        self.model.compile(optimizer="adam", loss="mse")

        self.target_model = Sequential()
        self.target_model.add(
            Dense(game.maze.size,
                  input_shape=self.state_size,
                  activation="relu"))
        self.target_model.add(Dense(game.maze.size, activation="relu"))
        self.target_model.add(Dense(len(actions)))
        self.target_model.compile(optimizer="adam", loss="mse")

    def save(self, filename):
        with open(filename + ".json", "w") as outfile:
            outfile.write(self.model.to_json())
        self.model.save_weights(filename + ".h5", overwrite=True)

    def load(self, filename):
        with open(filename + ".json", "r") as infile:
            self.model = model_from_json(infile.read())
        self.model.load_weights(filename + ".h5")

    def train(self, stop_at_convergence=False, **kwargs):
        """ Hyperparameters:

            :keyword float discount: (gamma) preference for future rewards (0 = not at all, 1 = only)
            :keyword float exploration_rate: (epsilon) 0 = preference for exploring (0 = not at all, 1 = only)
            :keyword float exploration_decay: exploration rate reduction after each random step (<= 1, 1 = no at all)
            :keyword int episodes: number of training games to play
            :keyword int sample_size: number of samples to replay for training
            :return int, datetime: number of training episodes, total time spent
        """
        max_memory = kwargs.get("max_memory", 1000)
        discount = kwargs.get("discount", 0.90)
        exploration_rate = kwargs.get("exploration_rate", 0.10)
        exploration_decay = kwargs.get(
            "exploration_decay",
            0.995)  # % reduction per step = 100 - exploration decay
        episodes = kwargs.get("episodes", 10000)
        batch_size = kwargs.get("sample_size", 32)
        experience = ExperienceReplay(self.model,
                                      self.target_model,
                                      discount=discount,
                                      max_memory=max_memory)
        self.experience = experience
        experience.maze = self.game.maze
        experience.cells = self.game.cells
        experience.exit_cell = self.game.exit_cell
        experience.state_size = self.state_size
        experience.walls = self.game.walls

        # variables for reporting purposes
        cumulative_reward = 0
        cumulative_reward_history = []
        win_history = []

        start_list = list()  # starting cells not yet used for training
        start_time = datetime.now()

        for episode in range(1, episodes + 1):
            if not start_list:
                start_list = self.environment.empty.copy()
            start_cell = random.choice(start_list)
            start_list.remove(start_cell)
            state = self.environment.reset(start_cell)
            actions_counter = 0
            loss = 0.0
            action = (0, 0)
            old_action = (0, 0)
            self.game.old_action = old_action

            while True:
                if np.random.random() < exploration_rate:
                    c_state = state[0][0]
                    r_state = state[0][1]
                    c_target, r_target = self.environment.exit
                    delta_r = r_target - r_state
                    delta_c = c_target - c_state
                    delta = np.abs(delta_r) + np.abs(delta_c)
                    delta_r_percent = np.abs(delta_r) / delta * 100
                    delta_c_percent = np.abs(delta_c) / delta * 100
                    move = tuple(np.sign([delta_c, delta_r]))
                    actions_list = self.environment.actions.copy()
                    actions_list.remove(move)
                    if np.sum(np.abs(move)) == len(move):  #diagonal movement
                        move_c = (move[0], 0)
                        move_r = (0, move[1])
                        actions_list.remove(move_c)
                        actions_list.remove(move_r)
                        if np.abs(delta_r - delta_c) < 20:
                            action_d_pool = [move] * 15
                            action_r_pool = [move_r] * 15
                            action_c_pool = [move_c] * 15
                            actions_pool = np.concatenate(
                                (action_d_pool, action_c_pool, action_r_pool))
                            for i in range(len(actions_list)):
                                actions_pool = np.concatenate(
                                    (actions_pool, [actions_list[i]] * 9))
                        else:
                            action_d_pool = [move] * 10
                            action_r_pool = [move_r] * int(
                                np.round(delta_r_percent * 35))
                            action_c_pool = [move_c] * int(
                                np.round(delta_c_percent * 35))
                            actions_pool = np.concatenate(
                                (action_d_pool, action_c_pool, action_r_pool))
                            for i in range(len(actions_list)):
                                actions_pool = np.concatenate(
                                    (actions_pool, [actions_list[i]] * 9))
                    else:
                        action_move_pool = [move] * 25
                        actions_pool = action_move_pool
                        for i in range(len(actions_list)):
                            actions_pool = np.concatenate(
                                (actions_pool, [actions_list[i]] * 11))
                    action = tuple(random.choice(actions_pool))
                    #action = random.choice(self.environment.actions)
                    action_index = actions[action][1]

                else:
                    # q = experience.predict(state)
                    # action = random.choice(np.nonzero(q == np.max(q))[0])
                    action, action_index = self.predict(state, old_action)

                next_state, reward, status = self.environment.step(action)
                state_augm = experience.state_creator(state, old_action)
                next_state_augm = experience.state_creator(next_state, action)
                cumulative_reward += reward
                experience.remember(state_augm, action_index, reward,
                                    next_state_augm, status)
                if status in ("win",
                              "lose"):  # terminal state reached, stop episode
                    break
                if experience.buffer.size() > 2 * batch_size:
                    inputs, targets = experience.get_samples(
                        sample_size=batch_size)
                    self.model.fit(inputs,
                                   targets,
                                   epochs=1,
                                   batch_size=16,
                                   verbose=0)
                    if actions_counter % 10 == 0:
                        experience.target_train()
                    actions_counter += 1
                    loss += self.model.evaluate(inputs, targets, verbose=0)
                state = next_state
                old_action = action

                self.environment.render_q(self)

            cumulative_reward_history.append(cumulative_reward)

            logging.info(
                "episode: {:d}/{:d} | status: {:4s} | loss: {:.4f} | e: {:.5f}"
                .format(episode, episodes, status, loss, exploration_rate))

            if episode % 500 == -1:
                # check if the current model wins from all starting cells
                # can only do this if there is a finite number of starting states
                w_all, win_rate = self.environment.win_all(self)
                win_history.append((episode, win_rate))
                if w_all is True and stop_at_convergence is True:
                    logging.info("won from all start cells, stop learning")
                    break

            exploration_rate *= exploration_decay  # explore less as training progresses
        self.save(self.name)  # Save trained models weights and architecture

        now = datetime.now()
        time_elapsed = now - start_time
        self.time = now.timestamp() - start_time.timestamp()
        logging.info("episodes: {:d} | time spent: {}".format(
            episode, time_elapsed))

        return cumulative_reward_history, win_history, episode, datetime.now(
        ) - start_time

    def q(self, state, old_action):
        """ Get q values for all actions for a certain state. """
        state = self.experience.state_creator(state, old_action)
        return self.model.predict(state)[0]

    def predict(self, state, old_action):
        """ Policy: choose the action with the highest value from the Q-table.
            Random choice if multiple actions have the same (max) value.

            :param np.ndarray state: Game state.
            :return int: Chosen action.
        """
        q = self.q(state, old_action)

        logging.debug("q[] = {}".format(q))

        actions_index = np.nonzero(
            q == np.max(q))[0]  # get index of the action(s) with the max value
        return self.environment.actions[random.choice(
            actions_index)], actions_index
Example #25
0
          kernel_initializer='random_normal',
          input_dim=3))  # First hidden layer
ann.add(Dense(4, activation='relu',
              kernel_initializer='random_normal'))  # Second hidden layer
ann.add(Dense(1, activation='sigmoid',
              kernel_initializer='random_normal'))  # Output layer

# Optimize neural network with Adam (Adaptive moment estimation), combination of RMSProp and Momentum.
# Momentum takes past gradients into account in order to smooth out the gradient descent.
ann.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

ann.fit(X_train_norm, y_train, batch_size=10, epochs=100, verbose=0)

# In[18]:

ann_eval = ann.evaluate(X_train_norm, y_train, verbose=0)
y_pred = ann.predict_classes(X_test_norm, batch_size=10, verbose=0).flatten()
ann_class_report = classification_report(y_test.astype(bool),
                                         y_pred.astype(bool))

print('Loss and accuracy:')
print(ann_eval)
print()
print(ann_class_report)

# ## ROC Plot

# In[19]:

from sklearn import metrics
Example #26
0
def NewModel():
     #Array of classes
     #(0,1,2,3,4,5,6,7,8,9,10)
     Y_labels = []
     #random seed

     #collect data
     #load into python
     #pre-process
     #names -> dictionary
     #value -> numbers
     # Pre processing done in GetDeck

     #X_train = X_train[:250]
     print(X_train.shape)
     #print(X_train)
     #match decks to their training data speed e.g.house party - 5 speed
          #matched via dictionary labels
     #pad dataset (should be fine since all 60 cards
     #each add adds a layer (just doing Dense because it's like my brain haha)
     #X_trainShape = numpy.reshape(X_train,(60,8))
     print('done')
     kfold = KFold(n_splits=num_folds, shuffle=True)
     fold_no = 1
     for train, test in kfold.split(X_train,Y_train):
          model = Sequential()
          model.add(Dense(300,input_dim=(60*522),name="Input_Layer",activation="sigmoid"))
          model.add(Dense(250,name="Hidden"))
          model.add(Dense(180,name="Hidden2"))
          model.add(Dense(100,name="Hidden3", activity_regularizer=regularizers.l1(0.001)))
          model.add(Dense(3,name="Output",activation="softmax"))
          model.summary()
          #to_cat serialises classification ( e.g. "on" values)
          #sequential model
          print('starting stuff')
          #model.add function
          #train_test_split to do training sets test/train ratio of 10-20/80-90
          print(X_train.shape)
          model.compile(loss="categorical_crossentropy",optimizer="adam",metrics=["accuracy"])
          model.fit(X_train[train],Y_train[train],epochs=100,batch_size=150,verbose=2)
          scores = model.evaluate(X_train[test],Y_train[test],batch_size=150,verbose=0)
          print("Accuracy: %.2f%%" % scores[1]*100,flush=True)
          fold_no = fold_no + 1
          models.append(model)
          mScores.append(scores[1])
     #model.compile
     #loss e.g. rms
     #optimiser
     #metrics
     #model.fit (verbose = 2) for less warnings
     bestIndex = 0
     currIndex = 0
     for s in mScores:
          if mScores[currIndex] > mScores[bestIndex]:
               bestIndex = currIndex
          currIndex += 1

     model = models[bestIndex]
     model.fit(X_train,Y_train,epochs=50,batch_size=128,verbose=2)
     scores = model.evaluate(X_train,Y_train,batch_size=128,verbose=0)
     print("Accuracy: %.2f%%" % scores[1]*100,flush=True)   
     #eval model
     #model.evaluate
     #xtest, ytest, batch size, verbose 2
     #print them out
     print(mScores)
     print(mScores[bestIndex])
     print(scores[1])
     #save model somewhere model = JSON h5 = weights
     model_json = model.to_json()
     with open("model.json",'w') as json_file:
          json_file.write(model_json)
     model.save_weights('model.h5')
     print('SAVED MODEL')
Example #27
0
autoencoder.add(UpSampling2D(size=(4,4)))
autoencoder.add(Conv2D(1, kernel_size=(5, 5), strides=(1, 1),
                 activation='relu',
                   padding='same'))

# print layer shapes
encoder = Sequential(layers=autoencoder.layers[0:5])

autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=['accuracy'])
print(autoencoder.summary())

# Training
autoencoder.fit(X_train, X_train, epochs=10, batch_size=32)

# Evaluation
autoencoder.evaluate(X_train, X_train, batch_size=128)
autoencoder.evaluate(X_test, X_test, batch_size=128)


# based on autoencoders.ipynb
n=5
for k in range(n):
    ax = plt.subplot(2, n, k + 1)
    X = X_test[k:k+1,:].reshape((28,28))
    X *= 255
    X = X.astype(int)
    ax = plt.subplot(2, n, k + 1 + n)
    reconstruction = autoencoder.predict(X_test[k:k+1,:])
    reconstruction.resize((28,28))
    reconstruction*=255
    reconstruction = reconstruction.astype(int)
Example #28
0
class QReplayNetworkModel(AbstractModel):
    """ Prediction model which uses Q-learning and a neural network which replays past moves.

        The network learns by replaying a batch of training moves. The training algorithm ensures that
        the game is started from every possible cell. Training ends after a fixed number of games, or
        earlier if a stopping criterion is reached (here: a 100% win rate).
    """
    default_check_convergence_every = 5  # by default check for convergence every # episodes

    def __init__(self, game, **kwargs):
        """ Create a new prediction model for 'game'.

        :param class Maze game: maze game object
        :param kwargs: model dependent init parameters
        """
        super().__init__(game, **kwargs)

        if kwargs.get("load", False) is False:
            self.model = Sequential()
            self.model.add(
                Dense(game.maze.size, input_shape=(2, ), activation="relu"))
            self.model.add(Dense(game.maze.size, activation="relu"))
            self.model.add(Dense(len(game.actions)))
        else:
            self.load(self.name)

        self.model.compile(optimizer="adam", loss="mse")

    def save(self, filename):
        with open(filename + ".json", "w") as outfile:
            outfile.write(self.model.to_json())
        self.model.save_weights(filename + ".h5", overwrite=True)

    def load(self, filename):
        with open(filename + ".json", "r") as infile:
            self.model = model_from_json(infile.read())
        self.model.load_weights(filename + ".h5")

    def train(self, stop_at_convergence=False, **kwargs):
        """ Train the model.

            :param stop_at_convergence: stop training as soon as convergence is reached

            Hyperparameters:
            :keyword float discount: (gamma) preference for future rewards (0 = not at all, 1 = only)
            :keyword float exploration_rate: (epsilon) 0 = preference for exploring (0 = not at all, 1 = only)
            :keyword float exploration_decay: exploration rate reduction after each random step (<= 1, 1 = no at all)
            :keyword int episodes: number of training games to play
            :keyword int sample_size: number of samples to replay for training
            :return int, datetime: number of training episodes, total time spent
        """
        discount = kwargs.get("discount", 0.90)
        exploration_rate = kwargs.get("exploration_rate", 0.10)
        exploration_decay = kwargs.get(
            "exploration_decay",
            0.995)  # % reduction per step = 100 - exploration decay
        episodes = max(kwargs.get("episodes", 1000), 1)
        sample_size = kwargs.get("sample_size", 32)
        check_convergence_every = kwargs.get(
            "check_convergence_every", self.default_check_convergence_every)

        experience = ExperienceReplay(self.model, discount=discount)

        # variables for reporting purposes
        cumulative_reward = 0
        cumulative_reward_history = []
        win_history = []

        start_list = list()  # starting cells not yet used for training
        start_time = datetime.now()

        # training starts here
        for episode in range(1, episodes + 1):
            if not start_list:
                start_list = self.environment.empty.copy()
            start_cell = random.choice(start_list)
            start_list.remove(start_cell)

            state = self.environment.reset(start_cell)

            loss = 0.0

            while True:
                if np.random.random() < exploration_rate:
                    action = random.choice(self.environment.actions)
                else:
                    # q = experience.predict(state)d
                    # action = random.choice(np.nonzero(q == np.max(q))[0])
                    action = self.predict(state)

                next_state, reward, status = self.environment.step(action)

                cumulative_reward += reward

                experience.remember(
                    [state, action, reward, next_state, status])

                if status in (
                        Status.WIN,
                        Status.LOSE):  # terminal state reached, stop episode
                    break

                inputs, targets = experience.get_samples(
                    sample_size=sample_size)

                self.model.fit(inputs,
                               targets,
                               epochs=4,
                               batch_size=16,
                               verbose=0)
                loss += self.model.evaluate(inputs, targets, verbose=0)

                state = next_state

                self.environment.render_q(self)

            cumulative_reward_history.append(cumulative_reward)

            logging.info(
                "episode: {:d}/{:d} | status: {:4s} | loss: {:.4f} | e: {:.5f}"
                .format(episode, episodes, status.name, loss,
                        exploration_rate))

            if episode % check_convergence_every == 0:
                # check if the current model does win from all starting cells
                # only possible if there is a finite number of starting states
                w_all, win_rate = self.environment.check_win_all(self)
                win_history.append((episode, win_rate))
                if w_all is True and stop_at_convergence is True:
                    logging.info("won from all start cells, stop learning")
                    break

            exploration_rate *= exploration_decay  # explore less as training progresses

        self.save(self.name)  # Save trained models weights and architecture

        logging.info("episodes: {:d} | time spent: {}".format(
            episode,
            datetime.now() - start_time))

        return cumulative_reward_history, win_history, episode, datetime.now(
        ) - start_time

    def q(self, state, status=None):
        """ Get q values for all actions for a certain state. """
        if type(state) == tuple:
            state = np.array(state, ndmin=2)

        return self.model.predict(state)[0]

    def predict(self, state, status=None):
        """ Policy: choose the action with the highest value from the Q-table.
            Random choice if multiple actions have the same (max) value.

            :param np.ndarray state: game state
            :return int: selected action
        """
        q = self.q(state)

        logging.debug("q[] = {}".format(q))

        actions = np.nonzero(
            q == np.max(q))[0]  # get index of the action(s) with the max value
        return random.choice(actions)
Example #29
0
model.add(Dense(122, activation='tanh'))
# Adding dense layer 142 and activation= sigmoid
model.add(Dense(142, activation='sigmoid'))
# output layer
# 10 output units and activation = softmax
model.add(Dense(10, activation='softmax'))

# Compile the model
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

# fit the model
history = model.fit(train_data, train_labels_one_hot, batch_size=256, epochs=20, verbose=1,
                   validation_data=(test_data, test_labels_one_hot))

# Evaluating the result on test data and get the loss and accuracy values
[test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
print("Evaluation result on Test Data with Scaling : Loss = {}, accuracy = {}".format(test_loss, test_acc))

# Plotting history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
# Printing Title
plt.title('model accuracy')
# y label as accuracy
plt.ylabel('accuracy')
# x label as epoch
plt.xlabel('epoch')
# Placing a legend on 'train' and 'test'
plt.legend(['train', 'test'], loc='upper left')
# To show the graph
plt.show()
Example #30
0
def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.xlabel('Epoch')
    plt.ylabel(train)
    plt.legend(['train', 'validation'])
    plt.show()


# using accuracy and loss plots helps to check overfitting problem
print(show_train_history(train_history, 'accuracy', 'val_accuracy'))
print(show_train_history(train_history, 'loss', 'val_loss'))

# evaluation
scores = model.evaluate(x_test, y_test)
print("Final accuracy", scores[1])

prediction = model.predict_classes(x_test)
# convert predict result from 2D to 1D
import numpy as np
prediction = prediction.reshape(-1)

# show real value and prediction value
SentimentDict = {1: 'positive', 0: 'negative'}


def display_test_SentimentDict(idx):
    print(test_text[idx])
    print("real result:", SentimentDict[y_test[idx]])
    print("predict result:", SentimentDict[prediction[idx]])
print('Minimum review length: {}'.format(len(min((X_test + X_test), key=len))))


from keras.preprocessing import sequence
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)

from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
embedding_size=32
model=Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())


model.compile(loss='binary_crossentropy',
             optimizer='adam',
             metrics=['accuracy'])

batch_size = 64
num_epochs = 3
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]
model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=batch_size, epochs=num_epochs)

scores = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', scores[1])
    Conv2D(64,
           kernel_size=(2, 2),
           strides=1,
           activation='relu',
           padding='same'))
model.add(MaxPool2D((2, 2), 2, padding='same'))
model.add(
    Conv2D(32,
           kernel_size=(2, 2),
           strides=1,
           activation='relu',
           padding='same'))
model.add(MaxPool2D((2, 2), 2, padding='same'))
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dropout(rate=0.25))
model.add(Dense(units=24, activation='softmax'))
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit(datagen.flow(x_train, y_train, batch_size=200),
          epochs=20,
          validation_data=(x_test, y_test),
          shuffle=1)

acc = model.evaluate(x=x_test, y=y_test)

"model acc={}%".format(acc * 100)