Esempio n. 1
0
from ztlearn.datasets.digits import fetch_digits
from ztlearn.dl.layers import LSTM, Dense, Flatten

data = fetch_digits()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.3, random_seed=15)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# Model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('digits lstm')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)
Esempio n. 2
0
from ztlearn.utils import *
from ztlearn.dl.models import Sequential
from ztlearn.dl.optimizers import register_opt
from ztlearn.dl.layers import LSTM, Flatten, Dense

text = open('../../data/text/tinyshakespeare.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# Model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model_epochs = 2
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

plot_metric('Loss', model_epochs, fit_stats['train_loss'],
            fit_stats['valid_loss'])
plot_metric('Accuracy', model_epochs, fit_stats['train_acc'],
            fit_stats['valid_acc'])
Esempio n. 3
0
from ztlearn.utils import *
from ztlearn.dl.layers import LSTM
from ztlearn.dl.models import Sequential
from ztlearn.dl.optimizers import register_opt


x, y, seq_len = gen_mult_sequence_xtyt(1000, 10, 10)
train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4)

print_seq_samples(train_data, train_label)

opt = register_opt(optimizer_name = 'adagrad', momentum = 0.01, learning_rate = 0.01)

# Model definition
model = Sequential()
model.add(LSTM(10, activation = 'tanh', input_shape = (10, seq_len)))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model_epochs = 100
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size = 100,
                      epochs = model_epochs,
                      validation_data = (test_data, test_label))

print_seq_results(model.predict(test_data,(0,2,1)), test_label, test_data, unhot_axis = 2)

plot_metric('Loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'])
plot_metric('Accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'])