# Encoding testing dataset
encoding_test_y = to_categorical(test_y)

#print(encoding_train_y)

# Creating a model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10,  activation='relu'))
model.add(Dense(3, activation='softmax'))

# Compiling model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# Training a model
model.fit(train_x, encoding_train_y, epochs=200, batch_size=10)

# Evaluate the model
scores = model.evaluate(test_x, encoding_test_y)
print("\nAccuracy: %.2f%%" % (scores[1]*100))

# serialize model to JSONx
model_json = model.to_json()
with open("model_iris.json", "w") as json_file:
         json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model_iris.h5")


Esempio n. 2
0
# Add one hidden layer:
my_model.add(Dense(64, activation="relu"))

# Add an output layer:
my_model.add(Dense(1))

# Print the summary of the model:
print(my_model.summary())

# Create an instance of the Adam optimizer with the learning rate equal to 0.01
opt = Adam(learning_rate=0.01)

# compile the model:
my_model.compile(loss='mse', metrics=['mae'], optimizer=opt)

# Train tje model
my_model.fit(features_train_scaled,
             labels_train,
             epochs=50,
             batch_size=1,
             verbose=1)

# evaluate the trained model
res_mse, res_mae = my_model.evaluate(features_test_scaled,
                                     labels_test,
                                     verbose=0)

# Print the final loss (RMSE) and final metric (MAE)
print(res_mse, res_mae)
Esempio n. 3
0
# mc = ModelCheckpoint(filepath=date_now(),monitor='val_loss',save_best_only=True,mode='auto')
# filepath 그지점에 W 값이 들어간다

# mc = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='acc')
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='auto')
hist = model.fit(x_train,
                 y_train,
                 epochs=50,
                 batch_size=16,
                 validation_split=0.2,
                 verbose=1,
                 callbacks=[early_stopping, mc])

#4
loss = model.evaluate(x_test, y_test)
print(loss[0])
print(loss[1])

# y_pred = model.predict(x_test)
# print(y_pred)
# print(y_test)

# print(y_pred[:10])
# print(y_test[:10])
# # y_test[:10]
# y_pred[:10]

# Epoch 100/1000
# 375/375 [==============================] - 32s 85ms/step - loss: 0.0169 - acc: 0.9964 - val_loss: 0.2326 - val_acc: 0.9772
Esempio n. 4
0
model.add(Dense(hidden_units, input_dim=input_size))
model.add(Dense(num_labels))
model.add(Activation('softmax'))
model.summary()

StopWatch.start("compile")
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
StopWatch.stop("compile")
StopWatch.start("train")
h = model.fit(x_train, y_train, epochs=8, batch_size=batch_size)
StopWatch.stop("train")

StopWatch.start("test")
loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print("\nTest accuracy: %.1f%%" % (100.0 * acc))
StopWatch.stop("test")

StopWatch.benchmark()

# "Loss"
plt.plot(h.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()

# "Accuracy"
plt.plot(h.history['accuracy'])
plt.title('model accuracy')
Esempio n. 5
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# from keras.layers import Dense

model = Sequential()
model.add(Dense(10, input_dim=2))
model.add(Dense(5))
model.add(Dense(5))
model.add(Dense(1))

# 3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x, y, epochs=100, batch_size=1, validation_split=0.2)

# 4. 평가, 예측
loss, mae = model.evaluate(x, y)
print('loss :', loss)
print('mae :', mae)

y_predict = model.predict(x)
# print(y_predict)

# # RMSE 구하기
# from sklearn.metrics import mean_squared_error
# def RMSE(y_test, y_predict):
#     return np.sqrt(mean_squared_error(y_test, y_predict))

# print('RMSE :', RMSE(y_test, y_predict))
# print('mse :', mean_squared_error(y_test, y_predict))

# # R2 구하기
Esempio n. 6
0
#Universal-sentence-encoder
loaded_obj = 'https://tfhub.dev/google/universal-sentence-encoder/4'

# Our pre-trained model which is being trained on our dataset
model = Sequential()
model.add(hub.KerasLayer(loaded_obj, input_shape=[], dtype=tf.string, trainable=True))
#model.add(Dense(64, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True, verbose=1, save_weights_only=False)
history = model.fit(X_train, y_train, batch_size=16, epochs=6, shuffle=True, validation_data=(X_val,y_val), callbacks=[checkpoint])

model.save('model.h5')
model = load_model('model.h5', custom_objects={'KerasLayer':hub.KerasLayer})
model.summary()

# print test accuracy
results = model.evaluate(X_test, y_test)
print("test loss, test acc:", results)


##### Enter user input

x = [input()]
pred = model.predict(x)
tt = np.argmax(pred)
print(tt)
print(label1[tt])
Esempio n. 7
0
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
# Creating a Sequential Model and adding the layers
model = Sequential()
model.add(Conv2D(28, kernel_size=(3, 3), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())  # Flattening the 2D arrays for fully connected layers
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10, activation=tf.nn.softmax))

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
history = model.fit(x=x_train, y=y_train, epochs=5)

model.evaluate(x_test, y_test)

image_index = 4444
plt.imshow(x_test[image_index].reshape(28, 28), cmap='Greys')
pred = model.predict(x_test[image_index].reshape(1, 28, 28, 1))
print(pred.argmax())
print(model.summary())
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
#plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
Esempio n. 8
0
val_acc = H.history['val_accuracy']
epochs = range(1, len(loss) + 1)

#Построение графика ошибки
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

#Построение графика точности
plt.clf()
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

#Получение и вывод результатов на тестовом наборе
results = model.evaluate(test_data, test_label)
print(results)

#Вывод результатов бинарной классификации
all_data = np.vstack((train_data, test_data))
all_label = np.vstack((train_label, test_label))
pred = model.predict(all_data)
drawResults(all_data, all_label, pred)
Esempio n. 9
0
xTrain = xTrain.astype('float32')
xTest = xTest.astype('float32')
xTrain /= 255
xTest /= 255

# Preparing categories class as CNN output
yTrain = np_utils.to_categorical(yTrain, 10)
yTest = np_utils.to_categorical(yTest, 10)

# Initial xTrain data shape
input_shape = (28, 28, 1)

# Creating LeNet model
lenet = Sequential()
lenet.add(Conv2D(12, (5, 5), activation='relu', input_shape=input_shape))
lenet.add(Conv2D(12, (5, 5), activation='relu'))
lenet.add(Flatten())
lenet.add(Dense(30, activation='relu'))
lenet.add(Dense(10, activation='softmax'))

# Compiling LeNet model to configure the learning process
lenet.compile(optimizer=optimizers.Adam(0.01),
              loss='categorical_crossentropy',
              metrics=['categorical_accuracy'])

lenet.fit(xTrain, yTrain, batch_size=256, epochs=30, verbose=1)

final_loss, final_acc = lenet.evaluate(xTest, yTest, batch_size=128, verbose=2)

print('Final loss = ', final_loss)
print('Final accuracy = ', final_acc)

path = './save/cifar10/modelSave'
path2 = './save/cifar10/'

####1. loadmodel
model1 = load_model(path+'.h5')
loss, acc=model1.evaluate(x_test, y_test, batch_size=64)
print("model1 loss: ", loss)
print("model2 acc: ", acc)

####2. loadCheckPoint
model2 = load_model(path2+"cp_cifar10-04--1.5607.hdf5")
loss, acc=model2.evaluate(x_test, y_test, batch_size=64)
print("model2 loss: ", loss)
print("model2 acc: ", acc)

####3. loadweight
model3 = Sequential()
model3.add(Conv2D(100, (2,2), input_shape=(32,32,3)))
model3.add(Flatten())
model3.add(Dense(100, activation='relu'))
model3.add(Dense(10, activation='softmax'))

model3.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam')

model3.load_weights(path+"_weight.h5")

loss, acc = model3.evaluate(x_test, y_test, batch_size=64)
print("model3 loss: ", loss)
print("model3 acc: ", acc)
training_images = training_images / 255.0

test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images / 255.0

model = Sequential([
    Conv2D(filters=64,
           kernel_size=3,
           activation='relu',
           input_shape=(28, 28, 1)),
    MaxPooling2D(pool_size=2),
    Conv2D(filters=64, kernel_size=3, activation='relu'),
    MaxPooling2D(pool_size=2),
    Flatten(),
    Dense(units=128, activation='relu'),
    Dense(units=10, activation='softmax')
])

model.compile(optimizer='adam',
              loss=SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

model.summary()

model.fit(x=training_images, y=training_labels, epochs=5)

test_loss, test_accuracy = model.evaluate(x=test_images, y=test_labels)

print('Test loss: {}, Test accuracy: {}'.format(test_loss,
                                                test_accuracy * 100))
Esempio n. 12
0
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
print(X_train.shape)

X_train = X_train.reshape(60000, 784)

# One-hot encoding!
# y_train_onehot = np_utils.to_categorical(y_train, 10) !ordered

num_classes = len(set(y_train))
y_train = to_categorical(y_train, num_classes)

assert y_train[0].shape[0] == num_classes

# Model
model = Sequential()
model.add(Dense(num_classes, input_dim=784, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Model Summary
model.summary()

# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=10)

# Evaluate
scores = model.evaluate(X_train, y_train)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
Esempio n. 13
0
# 즉, input_length 파라미터를 넣지 않고도 돌아가긴 함

# Embedding layer의 출력이 3차원이므로 바로 LSTM에 넣을 수 있음
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# Model: "sequential"
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# embedding (Embedding)        (None, None, 11)          308
# _________________________________________________________________
# lstm (LSTM)                  (None, 32)                5632
# _________________________________________________________________
# dense (Dense)                (None, 1)                 33
# =================================================================
# Total params: 5,973
# Trainable params: 5,973
# Non-trainable params: 0
# _________________________________________________________________
'''

# Param #: 308 == 28 (단어사전 개수) * 11 (output_dim)

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.fit(pad_x, labels, epochs=100)

acc = model.evaluate(pad_x, labels)[1]
# loss, metrics를 반환하므로 1인덱스는 'acc'임
print(acc)
Esempio n. 14
0
              metrics=['acc'])
# ####loss가 이진 분류일 때는binary_crossentropy(0,1만 추출)
model.load_weights('../data/h5/k52_1_weight.h5')  #이미 모델은 save된 상태에서 weight하기
#이미 fit까지 포함되서 weight값 처리,,,,,,근데 모델은 저장안됨 fit만 저장.(모델, 컴파일은 필요)
'''
hist = model.fit(x_train,y_train, epochs=10, batch_size=8, verbose=1,
                 validation_split=0.2,callbacks=[es, cp]) #wieght 생성 지점
model.save('../data/h5/k52_1_model2.h5') #fit까지
#총 모델 save두번됨 원하는 장소에 저장 넣음
model.save_weights('../data/h5/k52_1_weight.h5')
from tensorflow.keras.models import Sequential, load_model
model1 = load_model('../data/h5/k52_1_model2.h5')
'''

#4_1. 평가, 예측
result = model.evaluate(x_test, y_test, batch_size=8)
print('가중치_loss : ', result[0])
print('가중치_acc : ', result[1])  #모델 3개 다 출력

#model.load_weights('../data/h5/k52_1_weight.h5') #근데 모델은 저장안됨 fit만 저장
# model2 = load_model('../data/h5/k52_1_model2.h5') #가중치와 모델이 포함
# result2=model2.evaluate(x_test,y_test, batch_size=8)
# print('로그모델_loss : ', result2[0])
# print('로그모델_acc : ', result2[1])

#---내가 맘에 드는 걸로 쓰면 된다.
'''
model1 = load_model('../data/h5/k52_1_model2.h5') #두자리에서 지정가능(모델 밑, 훈련(fit)밑)
로그모델_loss :  0.06455615162849426
로그모델_acc :  0.9782000184059143
#load_model을 훈련밑에다가 하면 평가, 예측값만 쓰면 값이 나온다.
Esempio n. 15
0
model.add(Activation("relu"))
model.add(Dense(classes))
model.add(Activation("softmax"))

optimizer = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)

model.compile(loss="categorical_crossentropy",
              optimizer=optimizer,
              metrics=["accuracy"])
print(model.summary())

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(x_test, y_test),
          shuffle=True)

time_end = time.time()

if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)

scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])

print('time cost for training is', round(time_end - time_start, 2), 's')
Esempio n. 16
0
hist = TensorBoard(log_dir='graph',
                   histogram_freq=0,
                   write_graph=True,
                   write_images=True)

model.fit(x_train,
          y_train,
          epochs=1000,
          batch_size=32,
          verbose=1,
          validation_split=0.5,
          callbacks=[es, cp, hist])

# 평가, 예측
loss, mse = model.evaluate(x_test, y_test, batch_size=32)
print("loss : ", loss)
print("mse : ", mse)

y_predict = model.predict(x_test)
# y_predict = np.argmax(y_predict, axis=1)
print("y_test.shape:", y_test.shape)  #(29, 1)
print("y_predict.shape:", y_predict.shape)  #(29, 1)

y_recovery = y_test

# 사용자정의 RMSE 함수
from sklearn.metrics import mean_squared_error


def RMSE(y_test, y_predict):
Esempio n. 17
0
def test_CRF():
    # data
    x = np.random.randint(1, embedding_num, nb_samples * timesteps)
    x = x.reshape((nb_samples, timesteps))
    x[0, -4:] = 0  # right padding
    x[1, :5] = 0  # left padding
    y = np.random.randint(0, output_dim, nb_samples * timesteps)
    y = y.reshape((nb_samples, timesteps))
    y_onehot = np.eye(output_dim)[y]
    y = np.expand_dims(y, 2)  # .astype('float32')

    # test with no masking, onehot, fix length
    model = Sequential()
    model.add(Embedding(embedding_num, embedding_dim, input_length=timesteps))
    crf = CRF(output_dim)
    model.add(crf)
    model.compile(optimizer='rmsprop', loss=crf_loss)
    model.fit(x, y_onehot, epochs=1, batch_size=10)
    model.save(MODEL_PERSISTENCE_PATH)
    load_model(MODEL_PERSISTENCE_PATH,
               custom_objects={
                   'CRF': CRF,
                   'crf_loss': crf_loss,
                   'crf_viterbi_accuracy': crf_viterbi_accuracy
               })

    # test with masking, sparse target, dynamic length;
    # test crf_viterbi_accuracy, crf_marginal_accuracy

    model = Sequential()
    model.add(Embedding(embedding_num, embedding_dim, mask_zero=True))
    crf = CRF(output_dim, sparse_target=True)
    model.add(crf)
    model.compile(optimizer='rmsprop',
                  loss=crf_loss,
                  metrics=[crf_viterbi_accuracy, crf_marginal_accuracy])
    model.fit(x, y, epochs=1, batch_size=10)

    # check mask
    y_pred = model.predict(x).argmax(-1)
    assert (y_pred[0, -4:] == 0).all()  # right padding
    assert (y_pred[1, :5] == 0).all()  # left padding

    # test viterbi_acc
    _, v_acc, _ = model.evaluate(x, y)
    np_acc = (y_pred[x > 0] == y[:, :, 0][x > 0]).astype('float32').mean()
    print(v_acc, np_acc)
    assert np.abs(v_acc - np_acc) < 1e-4

    # test config
    model.get_config()

    # test marginal learn mode, fix length

    model = Sequential()
    model.add(
        Embedding(embedding_num,
                  embedding_dim,
                  input_length=timesteps,
                  mask_zero=True))
    crf = CRF(output_dim, learn_mode='marginal', unroll=True)
    model.add(crf)
    model.compile(optimizer='rmsprop', loss=crf_loss)
    model.fit(x, y_onehot, epochs=1, batch_size=10)

    # check mask (marginal output)
    y_pred = model.predict(x)
    assert_allclose(y_pred[0, -4:], 1. / output_dim, atol=1e-6)
    assert_allclose(y_pred[1, :5], 1. / output_dim, atol=1e-6)

    # test marginal learn mode, but with Viterbi test_mode
    model = Sequential()
    model.add(
        Embedding(embedding_num,
                  embedding_dim,
                  input_length=timesteps,
                  mask_zero=True))
    crf = CRF(output_dim, learn_mode='marginal', test_mode='viterbi')
    model.add(crf)
    model.compile(optimizer='rmsprop', loss=crf_loss, metrics=[crf_accuracy])
    model.fit(x, y_onehot, epochs=1, batch_size=10)

    y_pred = model.predict(x)

    # check y_pred is onehot vector (output from 'viterbi' test mode)
    assert_allclose(np.eye(output_dim)[y_pred.argmax(-1)], y_pred, atol=1e-6)

    try:
        os.remove(MODEL_PERSISTENCE_PATH)
    except OSError:
        pass
Esempio n. 18
0
## Now fit the model to training data - specify epochs, batch_size, validation_split
history = model.fit(X_train_normd,
                    y_train,
                    batch_size=32,
                    epochs=100,
                    validation_split=0.1)

val_mse = history.history[
    'val_mse']  ##The history attribute is a dictionary which stores metrics from each epoch
mse = history.history['mse']
import matplotlib.pyplot as plt
plt.plot(val_mse, color='red')  ## Plot the validation error
plt.plot(mse, color='blue')  ## Plot training error
## You can see that the validation error saturate after 25 epochs or so. We don't need to train till 100 epochs (Early stopping)

## Now evaluate the model on test data
## But first normalize test data using same normalization that was used for training data

X_test_normd = (X_test - X_train_mean) / (10**-8 + X_train_std)
model.evaluate(X_test_normd, y_test)

## Mean squared error is 10 (Means our prediction for median house price is off by $3k (square root of 10))
"""## Some things to explore
- Try to see training performance if you don't normalize the inputs
- Try to tune the hyperparameters (number of layers, number of neurons, learning_rate of optimizer, batch_size etc) to see if you can converge faster or get a lower mse

## Assignment
- Try to use these techniques to model power plant data 
https://archive.ics.uci.edu/ml/datasets/Combined+Cycle+Power+Plant
"""
Esempio n. 19
0
model.add(Conv2D(64, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
H = model.fit(train_data,
              train_labels,
              epochs=12,
              batch_size=16,
              validation_split=0.2,
              callbacks=[CustomCallback(train_data, train_labels)])
model.evaluate(test_data, test_labels)

loss = H.history['loss']
val_loss = H.history['val_loss']
acc = H.history['accuracy']
val_acc = H.history['val_accuracy']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
plt.plot(epochs, acc, 'r', label='Training acc')
Esempio n. 20
0
axs[1].plot(range(1,
                  len(history.history['val_loss']) + 1),
            history.history['val_loss'], 'b')
axs[1].set_title('Loss do modelo')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,
                            len(history.history['loss']) + 1),
                  len(history.history['loss']) / 10)
axs[1].legend(['training loss', 'validation loss'], loc='best')
fig.savefig('history_mod01.png')
"""
    Verificando a acurácia do modelo
"""
scores = model.evaluate(np.array(X_test),
                        np.array(y_test),
                        batch_size=batch_size)
print('Acurácia: ' + str(scores[1]))
print('Erro: ' + str(scores[0]))
"""
    Carregamento dos dados para gerar a matriz de confusão
"""
true_y = []
pred_y = []
x = np.load('mod_xtest.npy')
y = np.load('mod_ytest.npy')

json_file = open(arquivo_modelo_json, 'r')
loaded_model_json = json_file.read()
json_file.close()
Esempio n. 21
0
print(seq)

from tensorflow.keras.preprocessing.sequence import pad_sequences
max_len = 100
padded_docs = pad_sequences(seq, padding='pre', maxlen=max_len)
print(padded_docs)

from tensorflow.keras.models import Sequential
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
embedding_vector_features = 30
model = Sequential()
model.add(Embedding(max_words, embedding_vector_features,
                    input_length=max_len))
model.add(LSTM(256))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
print(model.summary())

model.fit(padded_docs,
          Y_train,
          batch_size=128,
          epochs=10,
          validation_split=0.2)

text_seq = tkn.texts_to_sequences(X_test)
test_padded = pad_sequences(text_seq, maxlen=max_len)

accr = model.evaluate(test_padded, Y_test)
print(accr)
Esempio n. 22
0
class NeuralNet:
    def __init__(self, reviews, ratings, vocab_size, matrix):

        self.reviews = reviews
        self.ratings = ratings
        self.nn_model = Sequential()
        self.vocab_size = vocab_size
        self.embeddings_matrix = matrix

    def build_nn(self):
        #add the input and output layer here; you can use either tensorflow or pytorch
        # self.nn_meodel.add(tf.keras.layers.Reshape((), input_shape=(20153,)))
        self.nn_model.add(
            Embedding(self.vocab_size + 1,
                      100,
                      weights=[self.embeddings_matrix],
                      trainable=False,
                      input_length=29))
        # self.nn_model.add()
        self.nn_model.add(tf.keras.layers.Flatten())
        # self.nn_model.add(tf.keras.layers.Reshape((100*,), input_shape=(100,)))
        self.nn_model.add(Dense(5, activation=softmax_activation))
        # print(self.nn_model.output_shape)
        # print(self.nn_model.input_shape)

        self.nn_model.compile(loss=tf.keras.losses.categorical_crossentropy,
                              optimizer='adam',
                              metrics=[
                                  'accuracy',
                                  tf.keras.metrics.Recall(),
                                  tf.keras.metrics.Precision()
                              ])

    def train_nn(self, batch_size, epochs):
        # write the training loop here; you can use either tensorflow or pytorch
        # print validation accuracy
        review_val = self.reviews[:10000]
        rating_val = self.ratings[:10000]
        review_train = self.reviews[10000:]
        rating_train = self.ratings[10000:]

        callbacks = []
        callbacks.append(
            EarlyStopping(monitor='val_loss',
                          patience=25,
                          restore_best_weights=True))
        callbacks.append(
            tf.keras.callbacks.ReduceLROnPlateau(factor=0.5,
                                                 min_lr=7.8125e-8,
                                                 patience=10,
                                                 min_delta=0.001,
                                                 verbose=0))

        history = self.nn_model.fit(review_train,
                                    rating_train,
                                    batch_size=batch_size,
                                    epochs=epochs,
                                    validation_data=(review_val, rating_val),
                                    callbacks=callbacks,
                                    verbose=0)
        res = self.nn_model.evaluate(review_val, rating_val, verbose=0)
        print("Validation accuracy: {}%".format(round(res[1] * 100, 2)))

    def predict(self, reviews):
        # return a list containing all the ratings predicted by the trained model
        results = self.nn_model.predict(reviews)
        return results
Esempio n. 23
0
R2 :  0.9999999999859842

verbose==3 :

Epoch 3/100
Epoch 4/100
Epoch 5/100

loss :  1.5092356875356927e-07
mae :  0.0003520280006341636
RMSE :  0.0003884888238632737
R2 :  0.9999999998090368

'''
# 4. 평가 , 예측
loss, mae = model.evaluate(x_test, y_test)
print('loss : ', loss)
print('mae : ', mae)

y_predict = model.predict(x_test)

from sklearn.metrics import mean_squared_error


def RMSE(y_test, y_predict):
    return np.sqrt(mean_squared_error(y_test, y_predict))


print("RMSE : ", RMSE(y_test, y_predict))

from sklearn.metrics import r2_score
fig.savefig(f'plots/{model_name}.png', format='png', dpi=250, bbox_inches='tight')

gc.collect()

# Store model data to csv for analysis
filePath = 'ml-results.csv'

csvColumns = "Name,Val_Loss,Val_Mae,Epochs_Scheduled,Epochs_Ran,Training_Time(Mins),Input_Len,Output_Len,Batch_Size,Optimizer,Regularization,Dropout"
if not os.path.isfile(filePath):
    f = open(filePath, "a")
    f.write(csvColumns)
    f.close()

df = pd.read_csv(filePath)
df = df[csvColumns.split(',')]

score = model.evaluate(testX, testY, verbose=0)

csvRow = {
    'Name': model_name, 'Val_Loss': score[0], 'Val_Mae': score[1],
    'Epochs_Scheduled': EPOCHS, 'Epochs_Ran': len(history.history['loss']),
    'Training_Time(Mins)': sum(time_callback.times)/60, 'Input_Len': INPUT_LEN, 'Output_Len': OUTPUT_LEN,
    'Batch_Size': BATCH_SIZE, 'Optimizer': optimizer_str, 'Regularization': regularization_str,
    'Dropout': DROPOUT
    }

df = df.append(csvRow, ignore_index=True)

df.to_csv(path_or_buf=filePath, index=False)

print('model-results.csv updated')
Esempio n. 25
0
x_train, x_test, y_train, y_test = train_test_split(x_dataset[:, :,
                                                              np.newaxis],
                                                    to_categorical(y_dataset),
                                                    test_size=0.2,
                                                    random_state=42)

hid_dim = 10

# SimpleRNNにDenseを接続し、分類
model = Sequential()

model.add(SimpleRNN(hid_dim, input_shape=x_train.shape[1:])
          )  # input_shape=(系列長T, x_tの次元), output_shape=(units(=hid_dim),)
model.add(Dense(y_train.shape[1], activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          epochs=50,
          batch_size=100,
          verbose=2,
          validation_split=0.2)

score = model.evaluate(x_test, y_test, verbose=0)
print('test_loss:', score[0])
print('test_acc:', score[1])

SVG(model_to_dot(model).create(prog='dot', format='svg'))
Esempio n. 26
0
model3.add(Dense(128, activation='relu'))
model3.add(Dense(300, activation='relu'))
model3.add(Dense(1024, activation='relu'))
model3.add(Dense(150, activation='relu'))
model3.add(Dense(70, activation='relu'))
model3.add(Dense(32, activation='relu'))
model3.add(Dense(1, activation='sigmoid')) #2진분류: sigmoid -> output: 0 or 1 이니까 1개임 


# 3. 컴파일
model3.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model3.load_weights('./save/cancer_dnn_weights.h5')


#4. 평가, 예측
result3 = model3.evaluate(x_test, y_test, batch_size=32)
print("========weights 저장=========")
print("loss : ", result3[0])
print("accuracy : ", result3[1])



'''
====model & weights 같이 저장=========
loss :  0.007307616528123617
accuracy :  1.0
6/6 [==============================] - 0s 1ms/step - loss: 0.1741 - accuracy: 0.9708
=======checkpoint 저장=========
loss :  0.17407235503196716
accuracy :  0.9707602262496948
6/6 [==============================] - 0s 1ms/step - loss: 8.6444e-08 - acc: 1.0000
Esempio n. 27
0
def trainCNNmodel(mfcc,
                  label,
                  gpu=0,
                  cpu=4,
                  niter=100,
                  nstep=10,
                  neur=16,
                  test=0.08,
                  num_classes=2,
                  epoch=30,
                  verb=0,
                  thr=0.85,
                  w=False):
    # Convolutional NN

    #    config = tf.ConfigProto(device_count={'GPU':gpu, 'CPU':cpu})
    #    sess = tf.Session(config=config)

    # Train the model
    for trial in range(niter):

        if trial % nstep == 0:
            x_train, y_train, x_test, y_test, scaler, normal = prepareDataSet(
                mfcc, label, size=test)
        shapedata = (x_train.shape[1], )
        x_train = np.reshape(
            x_train, (x_train.shape[0], mfcc.shape[1], mfcc.shape[2], 1),
            order='C')
        x_test = np.reshape(x_test,
                            (x_test.shape[0], mfcc.shape[1], mfcc.shape[2], 1),
                            order='C')

        # train the model
        batch_size = None
        nnn = neur

        model = Sequential()
        model.add(
            Conv2D(nnn,
                   kernel_size=(3, 3),
                   activation='linear',
                   input_shape=(mfcc.shape[1], mfcc.shape[2], 1),
                   padding='same'))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D((2, 2), padding='same'))
        model.add(Dropout(0.25))
        model.add(Conv2D(2 * nnn, (3, 3), activation='linear', padding='same'))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
        model.add(Conv2D(4 * nnn, (3, 3), activation='linear', padding='same'))
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
        model.add(Dropout(0.4))
        model.add(Flatten())
        model.add(Dense(4 * nnn, activation='linear'))
        model.add(LeakyReLU(alpha=0.1))
        model.add(Dropout(0.3))
        model.add(Dense(num_classes, activation='softmax'))

        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

        train = model.fit(x_train,
                          y_train,
                          epochs=epoch,
                          verbose=verb,
                          validation_data=(x_test, y_test))

        res = model.evaluate(x_test, y_test, verbose=0)
        print('loss ', res[0], 'accuracy ', res[1])
        if res[1] >= thr and w == True:
            print('found good match ', res[1].round(3))
            modelDump(model, x_train, y_train, x_test, y_test, scaler, normal,
                      res[1], train)


#    sess.close()
    return (model, x_train, y_train, x_test, y_test, scaler, normal, res[1],
            train)
Esempio n. 28
0
model.add(Dense(60))
model.add(Dense(60))
model.add(Dense(60))
model.add(Dense(1))

#3
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train,
          y_train,
          epochs=1000,
          batch_size=6,
          validation_split=0.2,
          verbose=1)

#4
loss, mae = model.evaluate(x_test, y_test, batch_size=6)
print('loss,mae : ', loss, mae)

y_predict = model.predict(x_test)

from sklearn.metrics import mean_squared_error


def RMSE(y_test, y_predict):
    return np.sqrt(mean_squared_error(y_test, y_predict))


print('RMSE : ', RMSE(y_test, y_predict))

from sklearn.metrics import r2_score
model.add(
    Conv1D(filters=nb_filter, kernel_size=filter_length, activation='relu'))
model.add(MaxPooling1D(pool_size=2))

model.add(Flatten())

model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train, y_train, batch_size=64, epochs=5)

score = model.evaluate(X_test, y_test, verbose=1)
print('acc: ' + str(score[1]))

from sklearn.metrics import precision_recall_fscore_support, classification_report
y_pred = model.predict_classes(X_test)
end = time.time()
print('CNN Classification report:\n', classification_report(y_test, y_pred))
'''
model = load_model('/home/zhangyc/下载/paper/code/CNN/Models/Model_cnn_FR_2.h5')
model.name='Model_cnn_FR_2.h5'
'''

fpr, tpr, threshold = roc_curve(y_test, y_pred)  ###计算真正率和假正率
roc_auc = auc(fpr, tpr)  ###计算auc的值

plt.figure()
Esempio n. 30
0
               loss = 'categorical_crossentropy',
               metrics = ['accuracy'])


###############################################################################

# Model Training

model.fit(X_train, y_train, batch_size = 32,
           epochs = 15, verbose = 1, 
           validation_data = (X_test, y_test))

###############################################################################

# Evaluate Your  Model's Performance
score = model.evaluate(X_test, y_test, batch_size = 32)


###############################################################################
# Prediction
model3.predict(X_test, y_test, batch_size = 32)
model3.predict_classes(X_test, batch_size = 32)

###############################################################################

# Save / Reload Models

from tensorflow.keras.models import load_model
model3.save('model3_file.h5')
my_model  = load_model('model3_file.h5')
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])

    callbacks = [
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
    ]

    # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
    if hvd.rank() == 0:
        callbacks.append(tf.keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))

    model.fit(x_train, y_train,
              batch_size=batch_size,
              callbacks=callbacks,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=0)

    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # Horovod: Save model only on worker 0 (i.e. master)
    if hvd.rank() == 0:
        saved_model_path = tf.contrib.saved_model.save_keras_model(model, args.model_dir)
        print("Model successfully saved at: {}".format(saved_model_path))
Esempio n. 32
0
dataset= load_diabetes()
print(type(dataset))

#print(dataset["DESCR"])
print(dataset.keys())

X=dataset["data"]
y=dataset["target"]

#y=(y-np.mean(y))/np.std(y)

X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1)
print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)

model = Sequential([
    Dense(128,activation="relu",input_shape=(X_train.shape[1],)),
    Dense(128,activation="relu"),
    Dense(128,activation="relu"),
    Dense(128,activation="relu"),
    Dense(128,activation="relu"),
    Dense(128,activation="relu"),
    Dense(1)
])

print(model.summary())

model.compile(optimizer="adam",loss="mse",metrics=["mae"])
history=model.fit(X_train,y_train,epochs=1000,validation_split=0.15)

print(model.evaluate(X_test,y_test,verbose=2))