예제 #1
0
def fit_model(train_X, train_Y, window_size = 1):
    EPOCHS=10
    model = Sequential()

    model.add(LSTM(4,
                   input_shape = (1, window_size)))
    model.add(Dense(1))
    model.compile(loss = "mean_squared_error",
                  optimizer = "adam")
    model.fit(train_X,
              train_Y,
              epochs = EPOCHS,
              batch_size = 1,
              verbose = 2)

    return(model)
예제 #2
0
model.add(Activation('relu'))                            
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))

512 * 784 +

model.summary()

# compiling the sequential model
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')

# training the model and saving metrics in history
history = model.fit(X_train, Y_train,
          batch_size=128, epochs=20,
          verbose=2,
          validation_data=(X_test, Y_test))

# plotting the metrics
fig = plt.figure()
plt.subplot(2,1,1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')

plt.subplot(2,1,2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
예제 #3
0
n_hidden = 300

model = Sequential()
model.add(
    LSTM(n_hidden,
         batch_input_shape=(None, length_of_sequence, in_out_neurons),
         return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
optimizer = Adam(lr=0.001)
model.compile(loss="mean_squared_error", optimizer=optimizer)

early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=20)
model.fit(g,
          h,
          batch_size=300,
          epochs=100,
          validation_split=0.1,
          callbacks=[early_stopping])

# 予測
predicted = model.predict(g)

plt.figure()
plt.plot(range(25,
               len(predicted) + 25),
         predicted,
         color="r",
         label="predict_data")
plt.plot(range(0, len(f)), f, color="b", label="row_data")
plt.legend()
plt.show()
예제 #4
0
#     # layers.MaxPooling2D(),
#     # layers.Conv2D(128, 3, activation='relu', padding='SAME'),
#     layers.MaxPooling2D(),
#     # keras.layers.Dropout(0.3),
#     layers.Flatten(),
#     layers.Dense(32, activation='relu'),
#     # keras.layers.Dropout(0.4),
#     layers.Dense(num_classes, activation=tf.nn.softmax)
# ])
# model.compile(
#   optimizer='adam',
#   loss="binary_crossentropy",
#   metrics=['accuracy'])

history = model.fit(normalized_train,
                    validation_data=normalized_train,
                    epochs=50)
print(f'{model.summary()}\n ==>model.summary()')

# 모델 훈련하기
# optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
# checkpoint = tf.train.Checkpoint(cnn=model)

# for epoch in range(TRAINING_EPOCHS):    # epochs 만큼 loop 반복
#     avg_loss = 0.
#     avg_train_acc = 0.
#     avg_test_acc = 0.
#     train_step = 0
#     test_step = 0
#
#     for images, labels in data_train:    # BATCH_SIZE 만큼 데이터와 라벨을 가져옴
model.summary()

# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopping = EarlyStopping(monitor='val_loss', patience=10, mode='auto')
modelpath = '../data/modelcheckpoint/k54_conv1d_fashion_checkpoint.hdf5'
cp = ModelCheckpoint(modelpath,
                     monitor='val_loss',
                     save_best_only=True,
                     mode='auto')
model.fit(x_train,
          y_train,
          batch_size=128,
          epochs=7,
          validation_split=0.2,
          callbacks=[early_stopping, cp])

# 4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=128)
y_pred = model.predict(x_test[:-10])
y_recovery = np.argmax(y_pred, axis=1).reshape(-1, 1)
print(y_recovery)
print("y_test : ", y_test[:-10])
print("y_pred : ", y_recovery)
print("loss : ", loss)
print("acc : ", acc)

# Dropout 적용, conv2d 두번째 node 100
# loss :  0.4641130864620209
예제 #6
0
model.add(Dense(1))
model.add(Activation('linear'))
model.summary()

writer = tf.summary.FileWriter("./log", sess.graph)

start = time.time()
model.compile(loss='mae', optimizer='adam')
print('compilation time : ', time.time() - start)

#########
# train #
#########
history = model.fit(x_train,
                    y_train,
                    batch_size=32,
                    epochs=10,
                    validation_data=(x_test, y_test))

###########
# predict #
###########
results = model.predict(x_test)
results = results.reshape(-1)

##################
# recover prices #
##################
# raw_prices = (y_test+1)*validateFirsts # 应该与close_prices[-490:0]相同
# predicted_prices = (results+1)*validateFirsts
# predicted_prices = predicted_prices + raw_prices.mean() - predicted_prices.mean()
예제 #7
0
파일: myNN.py 프로젝트: hooperw/aima-python
#model.add(keras.layers.Conv2D(filters=64,kernel_size=(5,5),strides=(2,2),activation='relu',padding='same'))
model.add(keras.layers.Conv2D(filters=64,kernel_size=(5,5),strides=(2,2),activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(keras.layers.Dropout(.2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(512,activation='relu'))
#model.add(keras.layers.Dropout(.5))
#model.add(keras.layers.Dense(1000,activation='tanh'))
model.add(keras.layers.Dense(10))
model.add(Activation('softmax'))

'''



'''
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test),
          )
'''
#model.save('Convolutional image classifier_Model4Reg.h5')

model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adam(lr=.001),metrics=['categorical_accuracy'])

#model.load_weights('Convolutional image classifier_Model4Reg.h5')


#https://www.youtube.com/watch?v=V23DmbdzMvg
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

iris = load_iris()
X = preprocessing.scale(iris['data'])
Y = to_categorical(iris['target'])

#print(X)
#print(Y)

#training data and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)

#model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10,  activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

#fitting the model
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=200, batch_size=10)
예제 #9
0
model.add(MaxPool2D())

model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(MaxPool2D())

model.add(Flatten())

model.add(Dense(units=num_classes))
model.add(Activation("softmax"))

# Compile and train (fit) the model, afterwards evaluate the model
model.summary()

model.compile(loss="categorical_crossentropy",
              optimizer=optimizer,
              metrics=["accuracy"])

model.fit(
    x=x_train,
    y=y_train,
    epochs=epochs,
    batch_size=batch_size,
    validation_data=(x_test, y_test),
)

score = model.evaluate(x_test, y_test, verbose=0)
print("Score: ", score)
예제 #10
0
# çikti degerlerinin kategorilestirmesi
from keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

#YSA model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

model = Sequential()
model.add(Dense(16,input_dim=4, activation="relu"))
model.add(Dense(12,activation="relu"))
model.add(Dense(3,activation="softmax"))
model.summary()

#modelin derlenmesi
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics= ["accuracy"])

#modelin egitilmesi
model.fit(X_train,y_train,validation_data=(X_test,y_test), epochs=500)
#Gerekli bilgilerin verilmesi
print(("Ortalama egitim kaybi: ", np.mean(model.history.history["loss"])))
print(("Ortalama Egitim Basarimi: ", np.mean(model.history.history["accuracy"])))
print(("Ortalama Dogrulama kaybi: ", np.mean(model.history.history["val_loss"])))
print(("Ortalama Dogrulama Basarimi: ", np.mean(model.history.history["val_accuracy"])))



tahmin = np.array([19.34,8.42,463.52,14.6]).reshape(1,4)
print(model.predict_classes(tahmin))
예제 #11
0
import tensorflow as tf # atualizado: tensorflow==2.0.0-beta1
from tensorflow.keras import backend as k # atualizado: tensorflow==2.0.0-beta1
from tensorflow.keras.models import Sequential # atualizado: tensorflow==2.0.0-beta1
classificador = Sequential([ # atualizado: tensorflow==2.0.0-beta1
               tf.keras.layers.Dense(units=16, activation = 'relu', kernel_initializer = 'random_uniform', input_dim=30),
               tf.keras.layers.Dense(units=16, activation = 'relu', kernel_initializer = 'random_uniform'),
               tf.keras.layers.Dense(units=1, activation = 'sigmoid')])

otimizador = tf.keras.optimizers.Adam(lr = 0.001, decay = 0.0001, clipvalue = 0.5) # atualizado: tensorflow==2.0.0-beta1
classificador.compile(optimizer = otimizador, loss = 'binary_crossentropy',
                      metrics = ['binary_accuracy'])

#classificador.compile(optimizer = 'adam', loss = 'binary_crossentropy',
#                      metrics = ['binary_accuracy'])
classificador.fit(previsores_treinamento, classe_treinamento,
                  batch_size = 10, epochs = 100)

pesos0 = classificador.layers[0].get_weights()
print(pesos0)
print(len(pesos0))
pesos1 = classificador.layers[1].get_weights()
pesos2 = classificador.layers[2].get_weights()

previsoes = classificador.predict(previsores_teste)
previsoes = (previsoes > 0.5)
from sklearn.metrics import confusion_matrix, accuracy_score
precisao = accuracy_score(classe_teste, previsoes)
matriz = confusion_matrix(classe_teste, previsoes)

resultado = classificador.evaluate(previsores_teste, classe_teste)
예제 #12
0
model.add(GRU(16))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

#Model compile and print summary
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
print(model.summary())

# Model train implement earlystopping callback
early_stopping = EarlyStopping('val_accuracy', patience=4)

history = model.fit(padded_text,
                    label,
                    epochs=5,
                    callbacks=[early_stopping],
                    validation_split=0.1)

#Model save
model.save('yelp_8k.h5')

#Model plots
epochs_ = len(history.history['loss'])

fig, axes = plt.subplots(2, 1)
axes[0].plot(range(epochs_), history.history['loss'], label='training')
axes[0].plot(range(epochs_), history.history['val_loss'], label='validation')
axes[0].legend()

axes[1].plot(range(epochs_), history.history['accuracy'], label='training')
예제 #13
0
# 필요한 라이브러리를 불러옵니다.
import numpy as np
import tensorflow as tf

# 실행할 때마다 같은 결과를 출력하기 위해 설정하는 부분입니다.
np.random.seed(3)
tf.random.set_seed(3)

# 데이터를 불러 옵니다.
dataset = numpy.loadtxt("../../dataset/pima-indians-diabetes.csv",
                        delimiter=",")
X = dataset[:, 0:8]
Y = dataset[:, 8]

# 모델을 설정합니다.
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# 모델을 컴파일합니다.
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# 모델을 실행합니다.
model.fit(X, Y, epochs=200, batch_size=10)

# 결과를 출력합니다.
print("\n Accuracy: %.4f" % (model.evaluate(X, Y)[1]))
예제 #14
0
    layers.Dense(128, activation='relu'),
    layers.Dense(num_classes)
])

# 2. loss, optimizer, metric
model.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])

model.summary()

# 4. model training
epochs = 10
# x = train_ds??
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs)

# 5. model evaluation
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
예제 #15
0
model2.add(LSTM(input_shape = (window_size, 1),
               units = window_size,
               return_sequences = True))
model2.add(Dropout(0.5))
model2.add(LSTM(256))
model2.add(Dropout(0.5))
model2.add(Dense(1))
model2.add(Activation("linear"))
model2.compile(loss = "mse",
              optimizer = "adam")
print(model2.summary())

# Fit the model.
model2.fit(train_X,
          train_Y,
          batch_size = 512,
          epochs = 3,
          validation_split = 0.1)


pred_test = model2.predict(test_X)

# Apply inverse transformation to get back true values.
test_y_actual = scaler.inverse_transform(test_Y.values.reshape(test_Y.shape[0], 1))

print("MSE for predicted test set: %2f" % mean_squared_error(test_y_actual, pred_test))

plt.figure(figsize = (15, 5))
plt.plot(test_y_actual, label="True value")
plt.plot(pred_test, label="Predicted value")
plt.xlabel("x")
# Encoding testing dataset
encoding_test_y = to_categorical(test_y)

#print(encoding_train_y)

# Creating a model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10,  activation='relu'))
model.add(Dense(3, activation='softmax'))

# Compiling model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# Training a model
model.fit(train_x, encoding_train_y, epochs=200, batch_size=10)

# Evaluate the model
scores = model.evaluate(test_x, encoding_test_y)
print("\nAccuracy: %.2f%%" % (scores[1]*100))

# serialize model to JSONx
model_json = model.to_json()
with open("model_iris.json", "w") as json_file:
         json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model_iris.h5")


예제 #17
0
    
    return Model(inputs=inputs, outputs=outputs)

model = build_model(x_train.shape[1:], 2)
print(x_train.shape[1:])    # (128, 862)

model.summary()

# 컴파일, 훈련
model.compile(optimizer='adam', loss="sparse_categorical_crossentropy", metrics=['acc'])
es = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True, verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:/nmb/nmb_data/h5/new_Conv1D_mfcc.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)
tb = TensorBoard(log_dir='C:/nmb/nmb_data/graph',histogram_freq=0, write_graph=True, write_images=True)
history = model.fit(x_train, y_train, epochs=300, batch_size=16, validation_split=0.2, callbacks=[es, lr, mc, tb])

# 시각화
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])

plt.title('loss & acc')
plt.xlabel('epoch')
plt.ylabel('loss & acc')
plt.legend(['train_loss', 'val_loss', 'train_acc', 'val_acc'])
plt.show()

# 평가, 예측
model.load_weights('C:/nmb/nmb_data/h5/new_Conv1D_mfcc.h5')
예제 #18
0
model.add(LSTM(128, activation='tanh', input_shape=(train_x.shape[1:])))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(2, activation="softmax"))

opt = tf.keras.optimizers.Adam(lr = 0.001, decay=1e-6)

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir=f"logs/{NAME}")

# Save the Models
filepath = "RNN_Final-{epoch:02d}-{val_acc:.3f}"  # unique file name that will include the epoch and the validation acc for that epoch
checkpoint = ModelCheckpoint("models/{}.model".format(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max'))  # saves only the best ones

history = model.fit(
    train_x, train_y,
    batch_size=BATCH_SIZE,
    epochs = EPOCHS,
    validation_data=(validation_x, validation_y),
    callbacks=[tensorboard, checkpoint])

# check tensorboard with: tensorboard --logdir=logs 
# need to be in directory where logs would be
예제 #19
0
model.add(LSTM(150, input_shape=(7, 1), activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(3))

model.summary()

# compile
model.compile(loss='mse', optimizer='adam')

# EarlyStopping
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='loss', patience=30, mode='min')

# fit
hist = model.fit(x_train,
                 y_train,
                 epochs=200,
                 batch_size=1,
                 verbose=1,
                 validation_split=0.2,
                 callbacks=[es])

# evaluate
evaluate = model.evaluate(x_test, y_test, batch_size=1, verbose=2)
print(evaluate)

# predict
y_pred = model.predict(x_pred)
print(y_pred)
예제 #20
0
EPOCHS = 75

# Backpropagation
# kompilasi model menggunakan SGD sebagai pengoptimal dan categorical
# cross-entropy loss (menggunakan binary_crossentropy untuk klasifikasi 2 kelas)
# dimana SGD adalah pengoptimalan library yang menggunakan backpropagation
print("[INFO] training network...")
opt = SGD(lr=INIT_LR)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# melatih jaringan saraf
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              epochs=EPOCHS,
              batch_size=32)

# mengevaluasi jaringan
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(
    classification_report(testY.argmax(axis=1),
                          predictions.argmax(axis=1),
                          target_names=lb.classes_))

# merencanakan kehilangan dan akurasi pelatihan
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
X_test = scale.transform(X_test)

# Creating the model and fitting it to the training data:

# This is a sequential neural network with 4 layers:
# Different types of activation functions are available: https://en.wikipedia.org/wiki/Activation_function
# we use here Rectified Linear Unit function.

model = Sequential()
model.add(Dense(15, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam', loss='binary_crossentropy')

model.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), epochs=200)

# Visualize the loss:
loss = pnd.DataFrame(model.history.history)
loss.plot()
plt.show()

# Testing the model:
print('Training Score: ', model.evaluate(X_train, y_train, verbose=0))
print('Testing Score: ', model.evaluate(X_test, y_test, verbose=0))

# Model evaluations:
predictions = model.predict_classes(X_test)
trainPredictions = model.predict_classes(X_train)
print('Training report: \n',
      metrics.classification_report(y_train, trainPredictions))
예제 #22
0

#create the model
model=Sequential()
model.add(Conv2D(6, kernel_size=(5,5), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(10, kernel_size=(5, 5), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(no_classes, activation='softmax'))

#compile the model
model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])

#fit data to model
model.fit(input_train, target_train, batch_size=batch_size, epochs=no_epochs, verbose=verbosity, validation_split=validation_split)

#generate generalization metrics
score=model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')



#keract visualizations
#for each layer
from keract import get_activations, display_activations
keract_inputs=input_test[:1]
keract_target=target_test[:1]
activations=get_activations(model, keract_inputs)
display_activations(activations, cmap='gray', save=False)
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))

model.summary()

from tensorflow.keras import optimizers

model.compile(optimizer='RMSprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    validation_split=0.2,
                    epochs=100,
                    batch_size=16)

history.history.keys()

history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']

epochs = range(100)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
예제 #24
0
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=tf.keras.losses.categorical_crossentropy,
              optimizer=tf.keras.optimizers.Adam(),
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', ds.metric("loss", score[0]))
print('Test accuracy:', ds.metric("accuracy", score[1]))

if os.path.isdir("model"):
    shutil.rmtree("model", ignore_errors=True)

MODEL_DIR = "./model"
export_path = os.path.join(MODEL_DIR)

model.save(export_path)
예제 #25
0
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
model.build(input_shape=(1,68,100,3))
model.summary()

# Define the Keras TensorBoard callback.
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir,write_graph=True)

# Train
print("Training...")
epochs=10;
history=model.fit(
    trainDataset,
    validation_data=valiDataset,
    epochs=epochs,
    callbacks=[tensorboard_callback])
print("Training completed.")

# Save model
tf.saved_model.save(model,modelPath)
print("Model save to:%s" %modelPath)



# Visualize training results
acc=history.history['accuracy']
valAcc=history.history['val_accuracy']
loss=history.history['loss']
valLoss=history.history['val_loss']
예제 #26
0
print("================================")

# nerual network
model = Sequential()
model.add(Dense(units=256, activation='relu',
                input_dim=6))  #kernel_regularizer=regularizers.l2(0.1)
for i in range(2):
    model.add(Dense(units=256, activation='relu'))
#   model.add(tf.keras.layers.Dropout(0.5))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mse', metrics=['mape', 'mae'])

startTime = time.time()
history = model.fit(training_data,
                    training_label,
                    epochs=200,
                    batch_size=16,
                    verbose=1)  #
endTime = time.time()
print("================ time cost: ", endTime - startTime)
# validation_data=(test_data,test_label), validation_freq=1)
# print(model.summary())
print("===================================================")
print(model.predict(test_data))

loss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)
fore_data = model.predict(test_data, batch_size=32)  # 通过predict函数输出网络的预测值

# record
model.save("3s1c.h5")
model = Sequential()
model.add(t)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))


model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=100, batch_size=512)


#4. 평가
loss, accuracy = model.evaluate(x_test, y_test, batch_size=512)

print("==========cifar10_MobileNetV2==========")
model.summary()
print("loss: ", loss)
print("acc: ", accuracy)

'''
==========cifar10_MobileNetV2==========
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Build the Model
model = Sequential()

model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))

model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, y_train, epochs=25, batch_size=32)

#### Test the Model Accuracy on Existing Data ####
test_start = datetime.datetime(2020, 1, 1)
test_end = datetime.datetime.now()

test_data = web.DataReader(company, 'yahoo', test_start, test_end)
actual_prices = test_data['Close'].values

total_dataset = pd.concat((data['Close'], test_data['Close']), axis=0)


model_inputs = total_dataset[len(total_dataset) - len(test_data) - prediction_days:].values
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scalar.transform(model_inputs)
model = Sequential()

model.add(Dense(units=9, activation='relu'))

model.add(Dense(units=15, activation='relu'))

model.add(Dense(units=7, activation='relu'))

model.add(Dense(units=1, activation='sigmoid'))

# For a binary classification problem
model.compile(loss='binary_crossentropy', optimizer='adam')
early_stop = EarlyStopping(monitor='val_loss',
                           mode='min',
                           verbose=1,
                           patience=25)

model.fit(x=X_train,
          y=y_train,
          epochs=600,
          validation_data=(X_test, y_test),
          verbose=1,
          callbacks=[early_stop])

model_loss = pd.DataFrame(model.history.history)
model_loss.plot()

predictions = (model.predict(X_test) > 0.5).astype("int32")
print(confusion_matrix(y_test, predictions))
예제 #30
0
파일: myNN.py 프로젝트: hooperw/aima-python
                     -0.51, -0.14, -0.25, 1.03, 0.45, -0.05, 0.25, -0.1, -0.53,
                     -0.49, -0.15, 0.24, -0.51, -0.21, 0.77], dtype='float32'),
              array([[0.05, -0.51, -0.13, 0.09, -0.33, -0.23, -0.01],
                     [-0.12, -0.64, -0.54, 0.06, -0.37, 0.01, -0.02],
                     [0.24, -0.65, -0.19, -0.09, 0.12, -0.32, -0.14],
                     [-0.12, -0.52, -0.89, -0.09, -0.09, 0.07, -0.49],
                     [-0.46, -0.36, -0.4, 0.78, -0.33, -0.03, -0.35],
                     [-2.74, -0.43, 0.57, -0.45, 1.05, -1.28, 0.66]], dtype='float32'),
              array([-0.52, -0.43, -0.12, -0.02, -0.08, -0.01, -0.04, -0.08, -0.01,
                     -0.42, -0.16, -0.39, -0.45, -0.46, -0.43, -0.26, -0.44, -0.2,
                     -0.41, -0.02, -0.3, -0.18, -0.42, 0.02, -0.43, -0.38, -0.43,
                     -0.42, -0.23, -0.42, -0.09, -0.15, 0.12], dtype='float32')]

# test the model and your weights

firstmodel1.fit(chessboard, chessboardtarget, epochs=10)
firstmodel1.set_weights(myWeights1)
predict3 = firstmodel1.predict(chessboardtarget)
np.set_printoptions(suppress=True)
np.set_printoptions(precision=9)
print('prediction =', predict3)

model1.fit(chessboard, chessboardtarget, epochs=10)
model1.set_weights(myWeights1)
predict3 = model1.predict(chessboardtarget)
np.set_printoptions(suppress=True)
np.set_printoptions(precision=9)
print('prediction =', predict3)

bettermodel1.fit(chessboard, chessboardtarget, epochs=10)
model1.set_weights(myWeights1)
예제 #31
0
model = Sequential()
model.add(Dense(10, input_dim=3))  #input 3개
model.add(Dense(5))
model.add(Dense(10))
model.add(Dense(500))
model.add(Dense(400))
# model.add(Dense(3)) #출력 컬럼 3개
model.add(Dense(1))  #output 1개

#---------------------나머지 완성
#3. 컴파일
model.compile(loss='mse', optimizer='adam', metrics=['mae'])

#훈련
model.fit(x_train, y_train, epochs=100, validation_split=0.3)

#4. 평가, 예측
loss = model.evaluate(x_test, y_test)
print("loss : ", loss)

y_predict = model.predict(x_test)
print("결과물 : ", y_predict)

#RMSE 함수 사용자 정의
from sklearn.metrics import mean_squared_error


def RMSE(y_test, y_predict):
    return np.sqrt(mean_squared_error(y_test, y_predict))
예제 #32
0
def train(train_file_path, validate_file_path, model_file_path, rseed=None):
    """Train a neural network to detect variants.

    Parameters
    ----------
    train_file_path : str
        Input tabulated feature file for training.
    validate_file_path : str
        Input tabulated feature file for validation during training.
    model_file_path : str
        Output trained model.
    rseed : int
        Random seed to ensure reproducible results.  Set to zero for non-deterministic results.
    """
    if rseed:
        logging.info("************************************************************************************************")
        logging.info("NOTICE: setting the random seed also forces single-threaded execution to ensure reproducibility.")
        logging.info("************************************************************************************************")
        logging.debug("Setting random seed = %d" % rseed)
        random.seed(rseed)
        np.random.seed(rseed)
        tf.set_random_seed(rseed)

        # Limit operation to 1 thread for deterministic results.
        cores = 1
    else:
        logging.info("************************************************************************************************")
        logging.info("NOTICE: results are not reproducible when rseed is not set.")
        logging.info("************************************************************************************************")

        # Use all CPUs
        cores = psutil.cpu_count(logical=True)

    logging.info("Using %d CPUs", cores)
    tf.config.threading.set_inter_op_parallelism_threads(cores)
    tf.config.threading.set_intra_op_parallelism_threads(cores)

    logging.debug("Kyos train, version %s" % __version__)
    logging.debug("Loading data...")
    data, one_hot_labels = load_train_data(train_file_path, first_ftr_idx, last_ftr_idx)
    data_validation, one_hot_label_validation = load_train_data(validate_file_path, first_ftr_idx, last_ftr_idx)

    logging.debug("Defining model...")
    model = Sequential()
    model.add(Dense(40, input_dim=num_input_features))
    model.add(Activation("relu"))
    model.add(Dropout(0.2))

    model.add(Dense(30))
    model.add(Activation("relu"))
    model.add(Dropout(0.2))

    model.add(Dense(30))
    model.add(Activation("relu"))
    model.add(Dropout(0.2))

    model.add(Dense(30))
    model.add(Activation("relu"))

    model.add(Dense(9, activation='softmax'))

    optimizer = keras.optimizers.RMSprop(lr=0.0005)

    logging.debug("Compiling model...")
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    early_stopping_monitor = EarlyStopping(patience=10, restore_best_weights=True)
    callbacks = [early_stopping_monitor]

    logging.debug("Fitting model...")
    model.fit(data, one_hot_labels, validation_data=(data_validation, one_hot_label_validation), batch_size=100000, callbacks=callbacks, epochs=100, verbose=2)

    logging.debug("Saving model...")
    model.save(model_file_path)
    logging.debug("Training finished.")
predictors, label = input_sequences[:,:-1],input_sequences[:,-1]

label = ku.to_categorical(label, num_classes=total_words)

model = Sequential()
model.add(Embedding(total_words, 100, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(150, return_sequences = True)))
model.add(Dropout(0.2))
model.add(LSTM(100))
model.add(Dense(total_words/2, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(total_words, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())


history = model.fit(predictors, label, epochs=100, verbose=1)

import matplotlib.pyplot as plt
acc = history.history['acc']
loss = history.history['loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.title('Training accuracy')

plt.figure()

plt.plot(epochs, loss, 'b', label='Training Loss')
plt.title('Training loss')
plt.legend()
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)

pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors

model.add(Dense(64))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X, y, batch_size=32, epochs=3, validation_split=0.3)

예제 #35
0
# 2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# from keras.layers import Dense

model = Sequential()
model.add(Dense(10, input_dim=5))
model.add(Dense(5))
model.add(Dense(5))
model.add(Dense(2))

# 3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train,
          y_train,
          epochs=100,
          batch_size=1,
          validation_split=0.2,
          verbose=3)
# verbose : 학습의 진행상황을 보여주는 것
# 0 - silent, 진행과정이 나오지 않지만 훈련이 빨라진다
# 1 - progress bar, default 값
# 2 = one line per epoch , bar를 제외하고 나옴
# 나머지 수 - epoch, epoch 만 나옴

# 4. 평가, 예측
loss, mae = model.evaluate(x_test, y_test)
print('loss :', loss)
print('mae :', mae)

y_predict = model.predict(x_test)
# print(y_predict)
예제 #36
0
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])

    callbacks = [
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
    ]

    # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
    if hvd.rank() == 0:
        callbacks.append(tf.keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))

    model.fit(x_train, y_train,
              batch_size=batch_size,
              callbacks=callbacks,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=0)

    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # Horovod: Save model only on worker 0 (i.e. master)
    if hvd.rank() == 0:
        saved_model_path = tf.contrib.saved_model.save_keras_model(model, args.model_dir)
        print("Model successfully saved at: {}".format(saved_model_path))
예제 #37
0
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard

early_stopping = EarlyStopping(monitor='loss', patience=20, mode='auto')

#log가 들어갈 폴더='graph'
#여기까지 해서 graph 폴더 생기고 자료들 들어가 있으면 텐서보드 쓸 준비 ok
#단, 로그가 많으면 겹쳐서 보일 수 있으니 그럴 땐 로그 삭제하고 
to_hist = TensorBoard(log_dir='graph', histogram_freq=0,
                      write_graph=True, write_images=True
)

model.compile(loss='categorical_crossentropy', 
              optimizer='adam', 
              metrics=['accuracy']) #"mean_squared_error" (풀네임도 가능하다)

model.fit(x_train, y_train, epochs=100, batch_size=32, validation_split=0.2, callbacks=[early_stopping])



#4. 평가, 예측
#fit에서 쓴 이름과 맞춰 주기 

loss, accuracy = model.evaluate(x_test, y_test, batch_size=32)

print("======cifar100_DNN=======")
print("column: ", d)
# model.summary()


print("loss: ", loss)
print("acc: ", accuracy)
예제 #38
0
파일: test.py 프로젝트: cottrell/notebooks
from tensorflow.keras.callbacks import TensorBoard

(X_train,y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000,28,28,1).astype('float32')
X_test = X_test.reshape(10000,28,28,1).astype('float32')

X_train /= 255
X_test /= 255

n_classes = 10
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)) )
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

tensor_board = TensorBoard('./logs/LeNet-MNIST-1')

model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=1, validation_data=(X_test,y_test), callbacks=[tensor_board])

예제 #39
0
def Alex_net():
    if cbbtrain.value() == "Y":
        train_data_dir = data.train
        validation_data_dir = data.val
        test_data_dir = data.test
    else:
        train_data_dir = data2.train
        test_data_dir = data2.test
        validation_data_dir = data2.val
    epochs = Take_input()
    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)

    model = Sequential()
    # 1st Convolutional Layer
    model.add(
        Conv2D(filters=96,
               input_shape=input_shape,
               kernel_size=(11, 11),
               strides=(4, 4),
               padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
    # 2nd Convolutional Layer
    model.add(
        Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1),
               padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
    # 3rd Convolutional Layer
    model.add(
        Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1),
               padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # 4th Convolutional Layer
    model.add(
        Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1),
               padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # 5th Convolutional Layer
    model.add(
        Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),
               padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
    # Fully Connected layer
    model.add(Flatten())
    # 1st Fully Connected Layer
    model.add(Dense(4096, input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # Add Dropout to prevent overfitting
    model.add(Dropout(0.5))
    # 2nd Fully Connected Layer
    model.add(Dense(4096))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # Add Dropout
    model.add(Dropout(0.5))
    # Output Layer
    model.add(Dense(2))
    model.add(BatchNormalization())
    model.add(Activation('softmax'))

    model.compile(loss="binary_crossentropy",
                  optimizer="rmsprop",
                  metrics=["accuracy"])

    train_datagen = ImageDataGenerator()

    validation_datagen = ImageDataGenerator()

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = validation_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    test_datagen = ImageDataGenerator()

    test_generator = test_datagen.flow_from_directory(test_data_dir,
                                                      target_size=(img_width,
                                                                   img_height),
                                                      batch_size=batch_size,
                                                      class_mode='binary')
    history = model.fit(train_generator,
                        steps_per_epoch=nb_train_samples // batch_size,
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=nb_train_samples // batch_size)

    def summarize_diagnostics(history):
        # plot loss
        pyplot.subplot(211)
        pyplot.title('Cross Entropy Loss')
        pyplot.plot(history.history['loss'], color='blue', label='train')
        pyplot.plot(history.history['val_loss'], color='orange', label='test')
        # plot accuracy
        pyplot.subplot(212)
        pyplot.title('Classification Accuracy')
        pyplot.plot(history.history['accuracy'], color='blue', label='train')
        pyplot.plot(history.history['val_accuracy'],
                    color='orange',
                    label='test')
        # save plot to file
        filename = sys.argv[0].split('/')[-1]
        pyplot.savefig(filename + '_plot1.png')
        pyplot.close()

    model.save("model1.h5")
    print("Saved model to disk")

    score = model.evaluate_generator(test_generator,
                                     nb_test_samples / batch_size,
                                     workers=12)

    scores = model.predict_generator(test_generator,
                                     nb_test_samples / batch_size,
                                     workers=12)

    # print("Loss: ", score[0], "Accuracy: ", score[1])

    correct = 0
    for i, n in enumerate(test_generator.filenames):
        if n.startswith("Fake") and scores[i][0] <= 0.5:
            correct += 1
        if n.startswith("Live") and scores[i][0] > 0.5:
            correct += 1

    print("Correct:", correct, " Total: ", len(test_generator.filenames))
    print("Loss: ", score[0], "Accuracy: ", score[1])

    Y_pred = model.predict_generator(test_generator,
                                     nb_test_samples // batch_size)
    y_pred = np.argmax(Y_pred, axis=1)
    print('Confusion Matrix')
    print(confusion_matrix(test_generator.classes, y_pred))
    print('Classification Report')
    target_names = ['Live', 'Fake']
    print(
        classification_report(test_generator.classes,
                              y_pred,
                              target_names=target_names))

    summarize_diagnostics(history)
예제 #40
0
파일: myNN.py 프로젝트: hooperw/aima-python
model.add(Dense(8,
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
model.fit(x_train, y_train, epochs=2000)
myWeights = model.get_weights()
np.set_printoptions(suppress=True)
np.set_printoptions(precision=2)
print('myWeights =', myWeights)

# These are the weights I got, pretty-printed
# myWeights = [
# #     # first layer, 7x8
#     array([[ 1.2 , -1.16, -1.97,  2.16,  0.97,  0.86, -1.2 ,  1.12],
#        [ 1.21, -1.17, -1.97,  2.16,  0.84,  0.76, -1.19,  1.22],
#        [ 1.19, -1.2 , -1.98,  2.15,  0.87,  0.84, -1.19,  1.13],
#        [ 1.21, -1.2 , -1.97,  2.15,  0.89,  0.8 , -1.2 ,  1.16],
#        [ 1.21, -1.12, -1.97,  2.16,  0.99,  0.8 , -1.21,  1.18],
#        [ 1.23, -1.09, -1.98,  2.15,  1.12,  0.81, -1.24,  1.13],
#        [ 1.24, -1.11, -1.99,  2.14,  1.  ,  0.77, -1.23,  1.17]],
예제 #41
0
model.add(Activation("relu"))

model.add(Conv2D(256,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))

model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(64))

model.add(Dense(3))
model.add(Activation("softmax"))

model.compile(loss = "sparse_categorical_crossentropy",optimizer = "adam",metrics=['accuracy'])

model.fit(X,y,batch_size = 32,epochs = 10,validation_split = 0.2)
model.save('3r-classifier')
new_model = tensorflow.keras.models.load_model('3r-classifier')
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
import random
from tqdm import tqdm
import tensorflow as tf

df= "C:/Users/prajw/OneDrive/Desktop/fruitsandveggie"

c= ["raw_test","ripe_test","rotten_test"]

for C in c: