コード例 #1
0
text = open(
    '../../../ztlearn/datasets/text/tinyshakespeare_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(
    RNN(128, activation='tanh', bptt_truncate=24, input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('shakespeare rnn')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

model_name = model.model_name
plot_metric('loss',
コード例 #2
0
from ztlearn.optimizers import register_opt
from ztlearn.dl.layers import RNN, Dense, Flatten

data = datasets.load_digits()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.4, random_seed=5)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# model definition
model = Sequential()
model.add(RNN(128, activation='tanh', bptt_truncate=5, input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='digits rnn')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)
コード例 #3
0
from ztlearn.optimizers import register_opt
from ztlearn.dl.layers import RNN, Flatten, Dense

x, y, seq_len = gen_mult_sequence_xtym(3000, 10, 10)
train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.3)

# plot samples of training data
print_seq_samples(train_data, train_label, 0)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.01)

# model definition
model = Sequential()
model.add(RNN(5, activation='tanh', bptt_truncate=5, input_shape=(9, seq_len)))
model.add(Flatten())
model.add(Dense(seq_len, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('seq rnn')

model_epochs = 15
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=100,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

print_seq_results(model.predict(test_data), test_label, test_data)
コード例 #4
0
paragraph = ' '.join(text_list)
sentences_tokens, vocab_size, longest_sentence = get_sentence_tokens(paragraph)
sentence_targets = one_hot(np.array([1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1]))

train_data, test_data, train_label, test_label = train_test_split(sentences_tokens,
                                                                  sentence_targets,
                                                                  test_size   = 0.2,
                                                                  random_seed = 5)

# optimizer definition
opt = register_opt(optimizer_name = 'adamax', momentum = 0.01, lr = 0.001)

model = Sequential()
model.add(Embedding(vocab_size, 2, input_length = longest_sentence))
model.add(RNN(5, activation = 'tanh', bptt_truncate = 2, input_shape = (2, longest_sentence)))
model.add(Flatten())
model.add(Dense(2, activation = 'softmax'))
model.compile(loss = 'bce', optimizer = opt)

model.summary('embedded sentences rnn')

"""
NOTE:
batch size should be equal the size of embedding
vectors and divisible  by the training  set size
"""

model_epochs = 500
fit_stats = model.fit(train_data,
                      train_label,
コード例 #5
0
                                                                  random_seed = 5,
                                                                  cut_off     = 10000)

# plot samples of training data
plot_img_samples(train_data, train_label, dataset = 'cifar', channels = 3)

reshaped_image_dims = 3 * 1024 # ==> (channels * (height * width))
reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32'))
reshaped_test_data  = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32'))

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001)

# model definition
model = Sequential()
model.add(RNN(256, activation = 'tanh', bptt_truncate = 5, input_shape = (3, 1024)))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax')) # 10 digits classes
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary(model_name = 'cifar-10 rnn')

model_epochs = 100 # add more epochs
fit_stats = model.fit(reshaped_train_data.reshape(-1, 3, 1024),
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (reshaped_test_data.reshape(-1, 3, 1024), one_hot(test_label)),
                      shuffle_data    = True)

predictions = unhot(model.predict(reshaped_test_data.reshape(-1, 3, 1024), True))