示例#1
0
def stack_decoder_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(256, activation='relu', input_shape=(latent_dim, )))
    model.add(BatchNormalization())
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(img_dim, activation='sigmoid'))

    return model
示例#2
0
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(256, input_shape=(img_dim, )))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(128))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(2, activation='sigmoid'))

    return model
示例#3
0
def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(256))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(512))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(img_dim, activation='tanh'))

    return model
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(
        Conv2D(64, kernel_size=(5, 5), padding='same', input_shape=img_dims))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=(5, 5), padding='same'))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('sigmoid'))

    return model
def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128 * 7 * 7, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Reshape((128, 7, 7)))
    model.add(UpSampling2D())
    model.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation('leaky_relu'))
    model.add(UpSampling2D())
    model.add(Conv2D(img_channels, kernel_size=(5, 5), padding='same'))
    model.add(Activation('tanh'))

    return model
示例#6
0
from ztlearn.dl.models import Sequential
from ztlearn.optimizers import register_opt

text = open(
    '../../../ztlearn/datasets/text/nietzsche_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtyt(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(30, len_chars)))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('nietzsche gru')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label),
                      verbose=False)

model_name = model.model_name
plot_metric('loss',
示例#7
0
from ztlearn.utils import *
from ztlearn.dl.models import Sequential
from ztlearn.dl.optimizers import register_opt
from ztlearn.dl.layers import BatchNormalization, Conv2D
from ztlearn.dl.layers import Dropout, Dense, Flatten, MaxPooling2D

data = datasets.load_digits()
plot_digits_img_samples(data)

train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.33, random_seed=5)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

model = Sequential(init_method='he_uniform')
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(1, 8, 8),
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
示例#8
0
    model = Sequential(init_method=init)
    model.add(Dense(256, activation='relu', input_shape=(latent_dim, )))
    model.add(BatchNormalization())
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(img_dim, activation='sigmoid'))

    return model


encoder = stack_encoder_layers(init=init_type)
decoder = stack_decoder_layers(init=init_type)

opt = register_opt(optimizer_name='adam', momentum=0.01, lr=0.001)

autoencoder = Sequential(init_method=init_type)
autoencoder.layers.extend(encoder.layers)
autoencoder.layers.extend(decoder.layers)
autoencoder.compile(loss='categorical_crossentropy', optimizer=opt)

encoder.summary('cifar-10 encoder')
decoder.summary('cifar-10 decoder')

autoencoder.summary('cifar-10 autoencoder')

data = fetch_cifar_10()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.data, test_size=0.2, random_seed=5, cut_off=2000)

# plot samples of training data
plot_img_samples(train_data, None, dataset='cifar', channels=3)
示例#9
0
    model.add(Dropout(0.25))
    model.add(Dense(2, activation='sigmoid'))

    return model


# stack and compile the generator
generator = stack_generator_layers(init=init_type)
generator.compile(loss='cce', optimizer=g_opt)

# stack and compile the discriminator
discriminator = stack_discriminator_layers(init=init_type)
discriminator.compile(loss='cce', optimizer=d_opt)

# stack and compile the generator_discriminator
generator_discriminator = Sequential(init_method=init_type)
generator_discriminator.layers.extend(generator.layers)
generator_discriminator.layers.extend(discriminator.layers)
generator_discriminator.compile(loss='cce', optimizer=g_opt)

generator.summary('digits generator')
discriminator.summary('digits discriminator')

generator_discriminator.summary('digits gan')
model_name = generator_discriminator.model_name

# rescale to range [-1, 1]
images = range_normalize(data.data.astype(np.float32))

for epoch_idx in range(model_epochs):
示例#10
0
# NOTE: Check the random_seed seeding for improperly shuffled data.
data = fetch_digits()
train_data, test_data, train_label, test_label = train_test_split(data.data,
                                                                  data.target,
                                                                  test_size   = 0.3,
                                                                  random_seed = 3)

# plot samples of training data
plot_tiled_img_samples(train_data, train_label)

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.001)
# opt = register_opt(optimizer_name = 'nestrov', momentum = 0.01, lr = 0.0001)

# 1. model definition
model = Sequential(init_method = 'he_normal')
model.add(Dense(256, activation = 'relu', input_shape = (64,)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(10, activation = 'relu')) # 10 digits classes
model.compile(loss = 'cce', optimizer = opt)

'''
# 2. model definition
model = Sequential()
model.add(Dense(256, activation = 'tanh', input_shape=(64,)))
model.add(Dense(10, activation = 'softmax')) # 10 digits classes
model.compile(loss = 'cce', optimizer = opt)
'''

model.summary(model_name = 'digits mlp')