Example #1
0
reshaped_image_dims = 3 * 1024 # ==> (channels * (height * width))
reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32'))
reshaped_test_data  = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32'))

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001)

# model definition
model = Sequential()
model.add(RNN(256, activation = 'tanh', bptt_truncate = 5, input_shape = (3, 1024)))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax')) # 10 digits classes
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary(model_name = 'cifar-10 rnn')

model_epochs = 100 # add more epochs
fit_stats = model.fit(reshaped_train_data.reshape(-1, 3, 1024),
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (reshaped_test_data.reshape(-1, 3, 1024), one_hot(test_label)),
                      shuffle_data    = True)

predictions = unhot(model.predict(reshaped_test_data.reshape(-1, 3, 1024), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions, dataset = 'cifar', channels = 3)

model_name = model.model_name
plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model_name)
Example #2
0
    '../../../ztlearn/datasets/text/tinyshakespeare_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtyt(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(30, len_chars)))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('shakespeare gru')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

model_name = model.model_name
plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
            fit_stats['valid_loss'],
            model_name=model_name)
plot_metric('accuracy',
Example #3
0
# plot samples of training data
plot_img_samples(train_data, train_label, dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='rmsprop',
                   momentum=0.01,
                   learning_rate=0.001)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='mnist gru')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 28, 28),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 28, 28), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions, dataset='mnist')

model_name = model.model_name
Example #4
0
    cut_off=2000)

# plot samples of training data
plot_img_samples(train_data[:40], train_label[:40], dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# model definition
model = Sequential()
model.add(RNN(128, activation='tanh', bptt_truncate=5, input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('mnist rnn')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 28, 28),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 28, 28), True))

print_results(predictions, test_label)
plot_img_results(test_data[:40], test_label[:40], predictions, dataset='mnist')
Example #5
0
    data.data, data.target, test_size=0.4, random_seed=5)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# model definition
model = Sequential()
model.add(RNN(128, activation='tanh', bptt_truncate=5, input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='digits rnn')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 8, 8), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions)

model_name = model.model_name
Example #6
0
    data.data, data.target, test_size=0.3, random_seed=15)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, lr=0.001)

# Model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('digits lstm')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 8, 8), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions)

plot_metric('loss',
Example #7
0
    cut_off=2000)

# plot samples of training data
plot_img_samples(train_data[:40], train_label[:40], dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, lr=0.001)

# model definition
model = Sequential()
model.add(RNN(128, activation='tanh', bptt_truncate=5, input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('fashion mnist rnn')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 28, 28),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 28, 28), True))

print_results(predictions, test_label)
plot_img_results(test_data[:40], test_label[:40], predictions, dataset='mnist')

encoder = stack_encoder_layers(init=init_type)
decoder = stack_decoder_layers(init=init_type)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.0001)

autoencoder = Sequential(init_method=init_type)
autoencoder.layers.extend(encoder.layers)
autoencoder.layers.extend(decoder.layers)
autoencoder.compile(loss='categorical_crossentropy', optimizer=opt)

encoder.summary('mnist encoder')
decoder.summary('mnist decoder')

autoencoder.summary('mnist autoencoder')

model_name = autoencoder.model_name

mnist = fetch_mnist()
images = range_normalize(mnist.data.astype(np.float32), 0,
                         1)  # rescale to range [0, 1]

train_data, test_data, train_label, test_label = train_test_split(
    images, images, test_size=0.2, random_seed=15, cut_off=2000)
plot_img_samples(train_data[:40], None, dataset='mnist')

model_epochs = 500
fit_stats = autoencoder.fit(train_data,
                            train_label,
                            batch_size=64,
Example #9
0

encoder = stack_encoder_layers(init=init_type)
decoder = stack_decoder_layers(init=init_type)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

autoencoder = Sequential(init_method=init_type)
autoencoder.layers.extend(encoder.layers)
autoencoder.layers.extend(decoder.layers)
autoencoder.compile(loss='categorical_crossentropy', optimizer=opt)

encoder.summary('cifar-10 encoder')
decoder.summary('cifar-10 decoder')

autoencoder.summary('cifar-10 autoencoder')

data = fetch_cifar_10()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.data, test_size=0.2, random_seed=5, cut_off=2000)

# plot samples of training data
plot_img_samples(train_data, None, dataset='cifar', channels=3)

transformed_image_dims = img_dim
transformed_train_data = z_score(
    train_data.reshape(train_data.shape[0],
                       transformed_image_dims).astype(np.float32))
transformed_train_label = z_score(
    train_label.reshape(train_label.shape[0],
                        transformed_image_dims).astype(np.float32))
Example #10
0
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('shakespeare lstm')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

model_name = model.model_name
plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
            fit_stats['valid_loss'],
            model_name=model_name)
plot_metric('accuracy',
train_data, test_data, train_label, test_label = train_test_split(sentences_tokens,
                                                                  sentence_targets,
                                                                  test_size   = 0.2,
                                                                  random_seed = 5)

# optimizer definition
opt = register_opt(optimizer_name = 'adamax', momentum = 0.01, lr = 0.001)

model = Sequential()
model.add(Embedding(vocab_size, 2, input_length = longest_sentence))
model.add(RNN(5, activation = 'tanh', bptt_truncate = 2, input_shape = (2, longest_sentence)))
model.add(Flatten())
model.add(Dense(2, activation = 'softmax'))
model.compile(loss = 'bce', optimizer = opt)

model.summary('embedded sentences rnn')

"""
NOTE:
batch size should be equal the size of embedding
vectors and divisible  by the training  set size
"""

model_epochs = 500
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size = 2,
                      epochs     = model_epochs,
                      validation_data = (test_data, test_label))

plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model.model_name)
Example #12
0
    model.add(Dense(img_dim, activation='sigmoid'))

    return model


encoder = stack_encoder_layers(init=init_type)
decoder = stack_decoder_layers(init=init_type)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.0001)

autoencoder = Sequential(init_method=init_type)
autoencoder.layers.extend(encoder.layers)
autoencoder.layers.extend(decoder.layers)
autoencoder.compile(loss='categorical_crossentropy', optimizer=opt)

autoencoder.summary('digits autoencoder')

data = datasets.load_digits()
images = range_normalize(data.data.astype(np.float32), 0,
                         1)  # rescale to range [0, 1]
train_data, test_data, train_label, test_label = train_test_split(
    images, images, test_size=0.2, random_seed=15)
# plot samples of training data
plot_img_samples(train_data, None)

model_epochs = 500
fit_stats = autoencoder.fit(train_data,
                            train_label,
                            batch_size=64,
                            epochs=model_epochs,
                            validation_data=(test_data, test_label),
Example #13
0
    cut_off=2000)

# plot samples of training data
plot_img_samples(train_data, train_label, dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.01, lr=0.001)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='fashion mnist gru')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 28, 28),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 28, 28), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions, dataset='mnist')

model_name = model.model_name
Example #14
0
    x, y, test_size=0.3)

# plot samples of training data
print_seq_samples(train_data, train_label, 0)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.01)

# model definition
model = Sequential()
model.add(RNN(5, activation='tanh', bptt_truncate=5, input_shape=(9, seq_len)))
model.add(Flatten())
model.add(Dense(seq_len, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('seq rnn')

model_epochs = 15
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=100,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

print_seq_results(model.predict(test_data), test_label, test_data)

model_name = model.model_name
plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
            fit_stats['valid_loss'],
Example #15
0
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, lr=0.01)

# model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('nietzsche lstm')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
            fit_stats['valid_loss'],
            model_name=model.model_name)
plot_metric('accuracy',
            model_epochs,
Example #16
0
train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, lr=0.01)

# model definition
model = Sequential()
model.add(
    RNN(128, activation='tanh', bptt_truncate=24, input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('shakespeare rnn')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
            fit_stats['valid_loss'],
            model_name=model.model_name)
plot_metric('accuracy',
            model_epochs,
Example #17
0
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('digits cnn')

model_epochs = 12
fit_stats = model.fit(train_data.reshape(-1, 1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 1, 8, 8), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions)

plot_metric('loss',
Example #18
0
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('cifar-10 cnn')

model_epochs = 12  # change to 12 epochs
fit_stats = model.fit(train_data.reshape(-1, 3, 32, 32),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 3, 32, 32),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 3, 32, 32), True))
print_results(predictions, test_label)
plot_img_results(test_data,
                 test_label,
                 predictions,
Example #19
0

x, y, seq_len = gen_mult_sequence_xtyt(1000, 10, 10)
train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4)

# plot samples of training data
print_seq_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name = 'adagrad', momentum = 0.01, learning_rate = 0.01)

# model definition
model = Sequential()
model.add(LSTM(10, activation = 'tanh', input_shape = (10, seq_len)))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary('seq lstm')

model_epochs = 100
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size      = 100,
                      epochs          = model_epochs,
                      validation_data = (test_data, test_label))

print_seq_results(model.predict(test_data,(0, 2, 1)), test_label, test_data, unhot_axis = 2)

model_name = model.model_name
plot_metric('loss',     model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model_name)
plot_metric('accuracy', model_epochs, fit_stats['train_acc'],  fit_stats['valid_acc'],  model_name = model_name)
Example #20
0
train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, lr=0.01)

# model definition
model = Sequential()
model.add(
    RNN(128, activation='tanh', bptt_truncate=24, input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('nietzsche rnn')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
            fit_stats['valid_loss'],
            model_name=model.model_name)
plot_metric('accuracy',
            model_epochs,
Example #21
0
generator.compile(loss='cce', optimizer=g_opt)

# stack and compile the discriminator
discriminator = stack_discriminator_layers(init=init_type)
discriminator.compile(loss='cce', optimizer=d_opt)

# stack and compile the generator_discriminator
generator_discriminator = Sequential(init_method=init_type)
generator_discriminator.layers.extend(generator.layers)
generator_discriminator.layers.extend(discriminator.layers)
generator_discriminator.compile(loss='cce', optimizer=g_opt)

generator.summary('mnist generator')
discriminator.summary('mnist discriminator')

generator_discriminator.summary('mnist gan')
model_name = generator_discriminator.model_name

# rescale to range [-1, 1]
images = range_normalize(mnist_data.astype(np.float32))

for epoch_idx in range(model_epochs):

    # set the epoch id for print out
    print_epoch = epoch_idx + 1

    # set the discriminator to trainable
    discriminator.trainable = True

    for epoch_k in range(2):
Example #22
0
x, y, seq_len = gen_mult_sequence_xtyt(1000, 10, 10)
train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4)

# plot samples of training data
print_seq_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name = 'rmsprop', momentum = 0.01, learning_rate = 0.01)
# opt = register_opt(optimizer_name = 'adadelta', momentum = 0.01, learning_rate = 1)

# model definition
model = Sequential()
model.add(GRU(10, activation = 'tanh', input_shape = (10, seq_len)))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary('seq gru')

model_epochs = 100
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size      = 100,
                      epochs          = model_epochs,
                      validation_data = (test_data, test_label))

print_seq_results(model.predict(test_data,(0, 2, 1)), test_label, test_data, unhot_axis = 2)

model_name = model.model_name
plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model_name)
plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name = model_name)
Example #23
0
model = Sequential(init_method = 'he_normal')
model.add(Dense(256, activation = 'relu', input_shape = (64,)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(10, activation = 'relu')) # 10 digits classes
model.compile(loss = 'cce', optimizer = opt)

'''
# 2. model definition
model = Sequential()
model.add(Dense(256, activation = 'tanh', input_shape=(64,)))
model.add(Dense(10, activation = 'softmax')) # 10 digits classes
model.compile(loss = 'cce', optimizer = opt)
'''

model.summary(model_name = 'digits mlp')

model_epochs = 12
fit_stats = model.fit(train_data,
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (test_data, one_hot(test_label)),
                      shuffle_data    = True)

eval_stats  = model.evaluate(test_data, one_hot(test_label))
predictions = unhot(model.predict(test_data, True))
print_results(predictions, test_label)

plot_img_results(test_data, test_label, predictions)
Example #24
0
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='cce', optimizer=opt)

model.summary(model_name='cifar-10 mlp')

model_epochs = 200  # change to 200 epochs
fit_stats = model.fit(transformed_train_data,
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(transformed_test_data,
                                       one_hot(test_label)),
                      shuffle_data=True)

eval_stats = model.evaluate(transformed_test_data, one_hot(test_label))
predictions = unhot(model.predict(transformed_test_data, True))
print_results(predictions, test_label)

plot_img_results(test_data,
Example #25
0
from ztlearn.datasets.iris import fetch_iris

data = fetch_iris()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.3, random_seed=5)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(Dense(10, activation='sigmoid', input_shape=(train_data.shape[1], )))
model.add(Dense(3, activation='sigmoid'))  # 3 iris_classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('iris mlp')

model_epochs = 25
fit_stats = model.fit(train_data,
                      one_hot(train_label),
                      batch_size=10,
                      epochs=model_epochs,
                      validation_data=(test_data, one_hot(test_label)),
                      shuffle_data=True)

# eval_stats = model.evaluate(test_data, one_hot(train_label))
predictions = unhot(model.predict(test_data))
print_results(predictions, test_label)

model_name = model.model_name
plot_metric('loss',
Example #26
0
                                                                  cut_off     = 2000)

# plot samples of training data
plot_img_samples(train_data[:40], train_label[:40], dataset = 'mnist')

# optimizer definition
opt = register_opt(optimizer_name = 'rmsprop', momentum = 0.01, learning_rate = 0.001)

# model definition
model = Sequential()
model.add(LSTM(128, activation = 'tanh', input_shape = (28, 28)))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax')) # 10 digits classes
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary('fashion mnist lstm')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (test_data.reshape(-1, 28, 28), one_hot(test_label)),
                      shuffle_data    = True)

predictions = unhot(model.predict(test_data.reshape(-1, 28, 28), True))
print_results(predictions, test_label)
plot_img_results(test_data[:40], test_label[:40], predictions, dataset = 'mnist')

model_name = model.model_name
plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model_name)
generator.compile(loss='cce', optimizer=g_opt)

# stack and compile the discriminator
discriminator = stack_discriminator_layers(init=init_type)
discriminator.compile(loss='cce', optimizer=d_opt)

# stack and compile the generator_discriminator
generator_discriminator = Sequential(init_method=init_type)
generator_discriminator.layers.extend(generator.layers)
generator_discriminator.layers.extend(discriminator.layers)
generator_discriminator.compile(loss='cce', optimizer=g_opt)

generator.summary('fashion mnist generator')
discriminator.summary('fashion mnist discriminator')

generator_discriminator.summary('fashion mnist dcgan')
model_name = generator_discriminator.model_name

# rescale to range [-1, 1]
images = range_normalize(
    mnist_data.reshape((-1, ) + img_dims).astype(np.float32))

for epoch_idx in range(model_epochs):

    # set the epoch id for print out
    print_epoch = epoch_idx + 1

    # set the discriminator to trainable
    discriminator.trainable = True

    for epoch_k in range(1):
Example #28
0
# stack and compile the generator
generator = stack_generator_layers(init=init_type)
generator.compile(loss='cce', optimizer=g_opt)

# stack and compile the discriminator
discriminator = stack_discriminator_layers(init=init_type)
discriminator.compile(loss='cce', optimizer=d_opt)

# stack and compile the generator_discriminator
generator_discriminator = Sequential(init_method=init_type)
generator_discriminator.layers.extend(generator.layers)
generator_discriminator.layers.extend(discriminator.layers)
generator_discriminator.compile(loss='cce', optimizer=g_opt)

generator_discriminator.summary('digits dcgan')
model_name = generator_discriminator.model_name

# rescale to range [-1, 1]
images = range_normalize(
    data.data.reshape((-1, ) + img_dims).astype(np.float32))

for epoch_idx in range(model_epochs):

    # set the epoch id for print out
    print_epoch = epoch_idx + 1

    # set the discriminator to trainable
    discriminator.trainable = True

    for epoch_k in range(1):
Example #29
0
    data.data, data.target, test_size=0.33, random_seed=15)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.01, lr=0.001)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('digits_gru')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)

predictions = unhot(model.predict(test_data.reshape(-1, 8, 8), True))
print_results(predictions, test_label)
plot_img_results(test_data, test_label, predictions)

plot_metric('loss',
Example #30
0
from ztlearn.optimizers import register_opt


text = open('../../../datasets/text/nietzsche_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtyt(text, maxlen = 30, step = 1)
del text

train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4)

# optimizer definition
opt = register_opt(optimizer_name = 'rmsprop', momentum = 0.1, learning_rate = 0.01)

# model definition
model = Sequential()
model.add(GRU(128, activation = 'tanh', input_shape = (30, len_chars)))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary('nietzsche gru')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (test_data, test_label),
                      verbose         = False)

model_name = model.model_name
plot_metric('loss',     model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model_name)
plot_metric('accuracy', model_epochs, fit_stats['train_acc'],  fit_stats['valid_acc'],  model_name = model_name)