Ejemplo n.º 1
0
text = open(
    '../../../ztlearn/datasets/text/nietzsche_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtyt(text, maxlen=30, step=1)
del text

train_data, test_data, train_label, test_label = train_test_split(
    x, y, test_size=0.4)

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01)

# model definition
model = Sequential()
model.add(GRU(128, activation='tanh', input_shape=(30, len_chars)))
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('nietzsche gru')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size=128,
                      epochs=model_epochs,
                      validation_data=(test_data, test_label),
                      verbose=False)

model_name = model.model_name
plot_metric('loss',
            model_epochs,
            fit_stats['train_loss'],
Ejemplo n.º 2
0
model = Sequential()
model.add(Dense(1024, input_shape = (3072, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(100))
model.add(Activation('softmax'))
model.compile(loss = 'cce', optimizer = opt)

model.summary(model_name = 'cifar-100 mlp')

model_epochs = 12 # change to 200 epochs
fit_stats = model.fit(reshaped_train_data,
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (reshaped_test_data, one_hot(test_label)),
                      shuffle_data    = True)

eval_stats  = model.evaluate(reshaped_test_data, one_hot(test_label))
predictions = unhot(model.predict(reshaped_test_data, True))
print_results(predictions, test_label)
Ejemplo n.º 3
0
    return model


# stack and compile the generator
generator = stack_generator_layers(init=init_type)
generator.compile(loss='cce', optimizer=g_opt)

# stack and compile the discriminator
discriminator = stack_discriminator_layers(init=init_type)
discriminator.compile(loss='cce', optimizer=d_opt)

# stack and compile the generator_discriminator
generator_discriminator = Sequential(init_method=init_type)
generator_discriminator.layers.extend(generator.layers)
generator_discriminator.layers.extend(discriminator.layers)
generator_discriminator.compile(loss='cce', optimizer=g_opt)

generator.summary('digits generator')
discriminator.summary('digits discriminator')

generator_discriminator.summary('digits gan')
model_name = generator_discriminator.model_name

# rescale to range [-1, 1]
images = range_normalize(data.data.astype(np.float32))

for epoch_idx in range(model_epochs):

    # set the epoch id for print out
    print_epoch = epoch_idx + 1
Ejemplo n.º 4
0
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(img_dim, activation='sigmoid'))

    return model


encoder = stack_encoder_layers(init=init_type)
decoder = stack_decoder_layers(init=init_type)

opt = register_opt(optimizer_name='adam', momentum=0.01, lr=0.001)

autoencoder = Sequential(init_method=init_type)
autoencoder.layers.extend(encoder.layers)
autoencoder.layers.extend(decoder.layers)
autoencoder.compile(loss='categorical_crossentropy', optimizer=opt)

encoder.summary('cifar-10 encoder')
decoder.summary('cifar-10 decoder')

autoencoder.summary('cifar-10 autoencoder')

data = fetch_cifar_10()
train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.data, test_size=0.2, random_seed=5, cut_off=2000)

# plot samples of training data
plot_img_samples(train_data, None, dataset='cifar', channels=3)

transformed_image_dims = img_dim
transformed_train_data = z_score(
Ejemplo n.º 5
0
train_data, test_data, train_label, test_label = train_test_split(mnist.data,
                                                                  mnist.target.astype('int'),
                                                                  test_size   = 0.33,
                                                                  random_seed = 5,
                                                                  cut_off     = 2000)

# plot samples of training data
plot_tiled_img_samples(train_data[:40], train_label[:40], dataset = 'mnist')

# model definition
model = Sequential()
model.add(Dense(512, activation = 'relu', input_shape = (784,)))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(10, activation = 'relu')) # 10 digits classes
model.compile(loss = 'cce', optimizer = Adam())

model.summary()

model_epochs = 12
fit_stats = model.fit(train_data,
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (test_data, one_hot(test_label)),
                      shuffle_data    = True)

eval_stats = model.evaluate(test_data, one_hot(test_label))

predictions = unhot(model.predict(test_data, True))
print_results(predictions, test_label)