示例#1
0
# -*- coding: utf-8 -*-

from ztlearn.utils import *
from ztlearn.dl.models import Sequential
from ztlearn.dl.optimizers import register_opt
from ztlearn.dl.layers import LSTM, Flatten, Dense

text = open('../../data/text/tinyshakespeare.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# Model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model_epochs = 2
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label))

plot_metric('Loss', model_epochs, fit_stats['train_loss'],
            fit_stats['valid_loss'])
示例#2
0
from sklearn import datasets

from ztlearn.utils import *
from ztlearn.dl.models import Sequential
from ztlearn.dl.optimizers import register_opt
from ztlearn.dl.layers import BatchNormalization, Conv2D
from ztlearn.dl.layers import Dropout, Dense, Flatten, MaxPooling2D

data = datasets.load_digits()
plot_digits_img_samples(data)

train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.33, random_seed=5)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

model = Sequential(init_method='he_uniform')
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(1, 8, 8),
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
示例#3
0
latent_dim = 100
batch_size = 128
half_batch = int(batch_size * 0.5)

verbose = True
init_type = 'he_uniform'

model_epochs = 7500
model_stats = {
    'd_train_loss': [],
    'd_train_acc': [],
    'g_train_loss': [],
    'g_train_acc': []
}

d_opt = register_opt(optimizer_name='adam', beta1=0.5, learning_rate=0.0001)
g_opt = register_opt(optimizer_name='adam', beta1=0.5, learning_rate=0.00001)


def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128, input_shape=(latent_dim, )))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(img_dim, activation='tanh'))