Пример #1
0
def stack_decoder_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(256, activation='relu', input_shape=(latent_dim, )))
    model.add(BatchNormalization())
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(img_dim, activation='sigmoid'))

    return model
Пример #2
0
                                                                  data.target,
                                                                  test_size   = 0.3,
                                                                  random_seed = 3)

# plot samples of training data
plot_img_samples(train_data, train_label, dataset = 'cifar', channels = 3)

reshaped_image_dims = 3 * 32 * 32 # ==> (channels * height * width)
reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32'))
reshaped_test_data  = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32'))

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001)

model = Sequential()
model.add(Dense(1024, input_shape = (3072, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(100))
model.add(Activation('softmax'))
model.compile(loss = 'cce', optimizer = opt)
Пример #3
0
from ztlearn.utils import *
from ztlearn.dl.models import Sequential
from ztlearn.dl.optimizers import register_opt
from ztlearn.dl.layers import LSTM, Flatten, Dense

text = open('../../data/text/tinyshakespeare.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# Model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model_epochs = 2
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

plot_metric('Loss', model_epochs, fit_stats['train_loss'],
            fit_stats['valid_loss'])
plot_metric('Accuracy', model_epochs, fit_stats['train_acc'],
            fit_stats['valid_acc'])
Пример #4
0
from ztlearn.datasets.digits import fetch_digits
from ztlearn.dl.layers import LSTM, Dense, Flatten

data = fetch_digits()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.3, random_seed=15)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# Model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('digits lstm')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)
Пример #5
0
    fashion_mnist.data,
    fashion_mnist.target.astype('int'),
    test_size=0.33,
    random_seed=5,
    cut_off=None)

# plot samples of training data
plot_tiled_img_samples(train_data[:40], train_label[:40], dataset='mnist')

# optimizer definition
# opt = register_opt(optimizer_name = 'nestrov', momentum = 0.01, lr = 0.0001)
opt = register_opt(optimizer_name='adam', momentum=0.001, lr=0.001)

# model definition
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784, )))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(10, activation='relu'))  # 10 digits classes
model.compile(loss='cce', optimizer=opt)

model.summary()

model_epochs = 5
fit_stats = model.fit(train_data,
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, one_hot(test_label)),
                      shuffle_data=True)
Пример #6
0
]

paragraph = ' '.join(text_list)
sentences_tokens, vocab_size, longest_sentence = get_sentence_tokens(paragraph)
sentence_targets = one_hot(np.array([1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1]))

train_data, test_data, train_label, test_label = train_test_split(sentences_tokens,
                                                                  sentence_targets,
                                                                  test_size   = 0.2,
                                                                  random_seed = 5)

# optimizer definition
opt = register_opt(optimizer_name = 'adamax', momentum = 0.01, lr = 0.001)

model = Sequential()
model.add(Embedding(vocab_size, 2, input_length = longest_sentence))
model.add(RNN(5, activation = 'tanh', bptt_truncate = 2, input_shape = (2, longest_sentence)))
model.add(Flatten())
model.add(Dense(2, activation = 'softmax'))
model.compile(loss = 'bce', optimizer = opt)

model.summary('embedded sentences rnn')

"""
NOTE:
batch size should be equal the size of embedding
vectors and divisible  by the training  set size
"""

model_epochs = 500
fit_stats = model.fit(train_data,
Пример #7
0
from ztlearn.dl.layers import RNN, Flatten, Dense

text = open(
    '../../../ztlearn/datasets/text/tinyshakespeare_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(
    RNN(128, activation='tanh', bptt_truncate=24, input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('shakespeare rnn')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

model_name = model.model_name
plot_metric('loss',
Пример #8
0
def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(256))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(512))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(img_dim, activation='tanh'))

    return model
Пример #9
0
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(256, input_shape=(img_dim, )))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(128))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(2, activation='sigmoid'))

    return model
Пример #10
0
def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128 * 7 * 7, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Reshape((128, 7, 7)))
    model.add(UpSampling2D())
    model.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation('leaky_relu'))
    model.add(UpSampling2D())
    model.add(Conv2D(img_channels, kernel_size=(5, 5), padding='same'))
    model.add(Activation('tanh'))

    return model
Пример #11
0
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(
        Conv2D(64, kernel_size=(5, 5), padding='same', input_shape=img_dims))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=(5, 5), padding='same'))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('sigmoid'))

    return model
Пример #12
0
from ztlearn.utils import *
from ztlearn.dl.layers import Dense
from ztlearn.dl.models import Sequential
from ztlearn.optimizers import register_opt
from ztlearn.datasets.iris import fetch_iris

data = fetch_iris()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.3, random_seed=5)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(Dense(10, activation='sigmoid', input_shape=(train_data.shape[1], )))
model.add(Dense(3, activation='sigmoid'))  # 3 iris_classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('iris mlp')

model_epochs = 25
fit_stats = model.fit(train_data,
                      one_hot(train_label),
                      batch_size=10,
                      epochs=model_epochs,
                      validation_data=(test_data, one_hot(test_label)),
                      shuffle_data=True)

# eval_stats = model.evaluate(test_data, one_hot(train_label))
predictions = unhot(model.predict(test_data))
Пример #13
0
from ztlearn.optimizers import register_opt
from ztlearn.dl.layers import RNN, Dense, Flatten

data = datasets.load_digits()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.4, random_seed=5)

# plot samples of training data
plot_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# model definition
model = Sequential()
model.add(RNN(128, activation='tanh', bptt_truncate=5, input_shape=(8, 8)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='digits rnn')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 8, 8),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 8, 8),
                                       one_hot(test_label)),
                      shuffle_data=True)
Пример #14
0
# -*- coding: utf-8 -*-

import numpy as np

from ztlearn.dl.layers import Embedding
from ztlearn.dl.models import Sequential
from ztlearn.utils import train_test_split
from ztlearn.dl.optimizers import register_opt


opt = register_opt(optimizer_name = 'sgd_momentum', momentum = 0.01, learning_rate = 0.001)
model = Sequential(init_method = 'he_normal')
model.add(Embedding(10, 2, activation = 'selu', input_shape = (1, 10)))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

train_data = np.random.randint(10, size=(5, 1, 10))
train_label = np.random.randint(14, size=(5, 1, 10))

train_data, test_data, train_label, test_label = train_test_split(train_data,
                                                                  train_label,
                                                                  test_size = 0.1)

fit_stats = model.fit(train_data, train_label, batch_size = 4, epochs = 50)


"""
works

data = np.arange(0,100,1).reshape(10,1,10)
labels = np.arange(1,101,1).reshape(10,1,10)
Пример #15
0
from ztlearn.dl.optimizers import register_opt
from ztlearn.dl.layers import BatchNormalization, Conv2D
from ztlearn.dl.layers import Dropout, Dense, Flatten, MaxPooling2D

data = datasets.load_digits()
plot_digits_img_samples(data)

train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.33, random_seed=5)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

model = Sequential(init_method='he_uniform')
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(1, 8, 8),
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)
Пример #16
0
from ztlearn.optimizers import register_opt

x, y, seq_len = gen_mult_sequence_xtyt(1000, 10, 10)
train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# plot samples of training data
print_seq_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.01, learning_rate=0.01)
# opt = register_opt(optimizer_name = 'adadelta', momentum = 0.01, learning_rate = 1)

# model definition
model = Sequential()
model.add(GRU(10, activation='tanh', input_shape=(10, seq_len)))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('seq gru')

model_epochs = 100
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=100,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

print_seq_results(model.predict(test_data, (0, 2, 1)),
                  test_label,
                  test_data,
                  unhot_axis=2)
Пример #17
0
from ztlearn.optimizers import register_opt

text = open(
    '../../../ztlearn/datasets/text/nietzsche_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtyt(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(30, len_chars)))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('nietzsche gru')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label),
                      verbose=False)

model_name = model.model_name
plot_metric('loss',
            model_epochs,
Пример #18
0
from ztlearn.optimizers import register_opt
from ztlearn.dl.layers import RNN, Flatten, Dense

x, y, seq_len = gen_mult_sequence_xtym(3000, 10, 10)
train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.3)

# plot samples of training data
print_seq_samples(train_data, train_label, 0)

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.01)

# model definition
model = Sequential()
model.add(RNN(5, activation='tanh', bptt_truncate=5, input_shape=(9, seq_len)))
model.add(Flatten())
model.add(Dense(seq_len, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('seq rnn')

model_epochs = 15
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=100,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

print_seq_results(model.predict(test_data), test_label, test_data)