Beispiel #1
0
mnist = fetch_mnist()
train_data, test_data, train_label, test_label = train_test_split(
    mnist.data,
    mnist.target.astype('int'),
    test_size=0.3,
    random_seed=15,
    cut_off=2000)

# plot samples of training data
plot_img_samples(train_data, train_label, dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.01, lr=0.001)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='mnist gru')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data.reshape(-1, 28, 28),
                                       one_hot(test_label)),
                      shuffle_data=True)
Beispiel #2
0
from ztlearn.dl.models import Sequential
from ztlearn.optimizers import register_opt
from ztlearn.dl.layers import LSTM, Flatten, Dense


text = open('../../../ztlearn/datasets/text/tinyshakespeare_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen = 30, step = 1)
del text

train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4)

# optimizer definition
opt = register_opt(optimizer_name = 'rmsprop', momentum = 0.1, learning_rate = 0.01)

# model definition
model = Sequential()
model.add(LSTM(128, activation = 'tanh', input_shape = (30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars,  activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary('shakespeare lstm')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (test_data, test_label))

model_name = model.model_name
# plot samples of training data
plot_img_samples(train_data, train_label, dataset='cifar', channels=3)

reshaped_image_dims = 3 * 1024  # ==> (channels * (height * width))
reshaped_train_data = z_score(
    train_data.reshape(train_data.shape[0],
                       reshaped_image_dims).astype('float32'))
reshaped_test_data = z_score(
    test_data.reshape(test_data.shape[0],
                      reshaped_image_dims).astype('float32'))

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.0001)

# model definition
model = Sequential()
model.add(RNN(256, activation='tanh', bptt_truncate=5, input_shape=(3, 1024)))
model.add(Flatten())
model.add(Dense(100, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary(model_name='cifar-100 rnn')

model_epochs = 10  # add more epochs
fit_stats = model.fit(reshaped_train_data.reshape(-1, 3, 1024),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(reshaped_test_data.reshape(-1, 3, 1024),
                                       one_hot(test_label)),
                      shuffle_data=True)
def stack_decoder_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128, activation='relu', input_shape=(latent_dim, )))
    model.add(BatchNormalization())
    model.add(Dense(256, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(img_dim, activation='sigmoid'))

    return model
    model = Sequential(init_method=init)
    model.add(Dense(128, activation='relu', input_shape=(latent_dim, )))
    model.add(BatchNormalization())
    model.add(Dense(256, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(img_dim, activation='sigmoid'))

    return model


encoder = stack_encoder_layers(init=init_type)
decoder = stack_decoder_layers(init=init_type)

opt = register_opt(optimizer_name='adam', momentum=0.01, lr=0.0001)

autoencoder = Sequential(init_method=init_type)
autoencoder.layers.extend(encoder.layers)
autoencoder.layers.extend(decoder.layers)
autoencoder.compile(loss='categorical_crossentropy', optimizer=opt)

encoder.summary('mnist encoder')
decoder.summary('mnist decoder')

autoencoder.summary('mnist autoencoder')

mnist = fetch_mnist()
images = range_normalize(mnist.data.astype(np.float32), 0,
                         1)  # rescale to range [0, 1]

train_data, test_data, train_label, test_label = train_test_split(
    images, images, test_size=0.2, random_seed=15, cut_off=2000)
Beispiel #6
0
    test_size=0.33,
    random_seed=5,
    cut_off=2000)

# normalize to range [0, 1]
train_data = range_normalize(train_data.astype('float32'), 0, 1)
test_data = range_normalize(test_data.astype('float32'), 0, 1)

# plot samples of training data
plot_img_samples(train_data[:40], train_label[:40], dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

# model definition
model = Sequential(init_method='he_uniform')
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(1, 28, 28),
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
Beispiel #7
0
# NOTE: Check the random_seed seeding for improperly shuffled data.
data = fetch_digits()
train_data, test_data, train_label, test_label = train_test_split(data.data,
                                                                  data.target,
                                                                  test_size   = 0.3,
                                                                  random_seed = 3)

# plot samples of training data
plot_tiled_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, learning_rate = 0.001)
# opt = register_opt(optimizer_name = 'nestrov', momentum = 0.01, learning_rate = 0.0001)

# 1. model definition
model = Sequential(init_method = 'he_normal')
model.add(Dense(256, activation = 'relu', input_shape = (64,)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(10, activation = 'relu')) # 10 digits classes
model.compile(loss = 'cce', optimizer = opt)

'''
# 2. model definition
model = Sequential()
model.add(Dense(256, activation = 'tanh', input_shape=(64,)))
model.add(Dense(10, activation = 'softmax')) # 10 digits classes
model.compile(loss = 'cce', optimizer = opt)
'''

model.summary(model_name = 'digits mlp')