from ztlearn.objectives import ObjectiveFunction as objective

data = datasets.load_boston()
# print(data['DESCR'])

# take the boston data
boston_data = data['data']
input_data = z_score(
    boston_data[:, [5]])  # work with only one of the features: RM
input_label = data['target']

train_data, test_data, train_label, test_label = train_test_split(
    input_data, input_label, test_size=0.3)

# optimizer definition
opt = register_opt(optimizer_name='sgd', momentum=0.01, learning_rate=0.001)

# model definition
model = PolynomialRegression(degree=5,
                             epochs=100,
                             optimizer=opt,
                             penalty='elastic',
                             penalty_weight=0.5,
                             l1_ratio=0.3)

fit_stats = model.fit(train_data, train_label)
targets = np.expand_dims(test_label, axis=1)
predictions = np.expand_dims(model.predict(test_data), axis=1)
mse = objective('mean_squared_error').forward(predictions, targets)

print('Mean Squared Error: {:.2f}'.format(mse))
Exemple #2
0
train_data, test_data, train_label, test_label = train_test_split(
    mnist.data,
    mnist.target.astype('int'),
    test_size=0.33,
    random_seed=5,
    cut_off=2000)

# normalize to range [0, 1]
train_data = range_normalize(train_data.astype('float32'), 0, 1)
test_data = range_normalize(test_data.astype('float32'), 0, 1)

# plot samples of training data
plot_img_samples(train_data[:40], train_label[:40], dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='adam', momentum=0.01, lr=0.001)

# model definition
model = Sequential(init_method='he_uniform')
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(1, 28, 28),
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
from ztlearn.dl.layers import LSTM, Dense, Flatten
from ztlearn.datasets.fashion import fetch_fashion_mnist

fashion_mnist = fetch_fashion_mnist()
train_data, test_data, train_label, test_label = train_test_split(
    fashion_mnist.data,
    fashion_mnist.target.astype('int'),
    test_size=0.3,
    random_seed=15,
    cut_off=2000)

# plot samples of training data
plot_img_samples(train_data[:40], train_label[:40], dataset='mnist')

# optimizer definition
opt = register_opt(optimizer_name='rmsprop', momentum=0.01, lr=0.001)

# model definition
model = Sequential()
model.add(LSTM(128, activation='tanh', input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model.summary('fashion mnist lstm')

model_epochs = 100
fit_stats = model.fit(train_data.reshape(-1, 28, 28),
                      one_hot(train_label),
                      batch_size=128,
                      epochs=model_epochs,
Exemple #4
0
latent_dim = 100
batch_size = 128
half_batch = int(batch_size * 0.5)

verbose   = True
init_type = 'he_uniform'

gen_epoch = 500
gen_noise = np.random.normal(0, 1, (36, latent_dim)) # 36 as batch size and is also the number of sample to be generated at the prediction stage

model_epochs = 8000
model_name   = 'mnist_gan'
model_stats  = {'d_train_loss': [], 'd_train_acc': [], 'g_train_loss': [], 'g_train_acc': []}

d_opt = register_opt(optimizer_name = 'adam', beta1 = 0.5, lr = 0.001)
g_opt = register_opt(optimizer_name = 'adam', beta1 = 0.5, lr = 0.0001)

def stack_generator_layers(init):
    model = Sequential(init_method = init)
    model.add(Dense(256, input_shape = (latent_dim,)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum = 0.8))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum = 0.8))
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum = 0.8))
    model.add(Dense(img_dim, activation = 'tanh'))
Exemple #5
0
verbose = True
init_type = 'he_uniform'

gen_epoch = 50
gen_noise = np.random.normal(
    0, 1, (36, latent_dim))  # for tiles 6 by 6 i.e (36) image generation

model_epochs = 600
model_stats = {
    'd_train_loss': [],
    'd_train_acc': [],
    'g_train_loss': [],
    'g_train_acc': []
}

d_opt = register_opt(optimizer_name='adam', beta1=0.5, learning_rate=0.0002)
g_opt = register_opt(optimizer_name='adam', beta1=0.5, learning_rate=0.0002)


def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(64 * 2 * 2, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Reshape((64, 2, 2)))
    model.add(UpSampling2D())
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation('leaky_relu'))
    model.add(UpSampling2D())
    model.add(Conv2D(img_channels, kernel_size=(3, 3), padding='same'))
Exemple #6
0
# -*- coding: utf-8 -*-

from ztlearn.utils import *
from ztlearn.dl.models import Sequential
from ztlearn.optimizers import register_opt
from ztlearn.dl.layers import LSTM, Flatten, Dense


text = open('../../../ztlearn/datasets/text/tinyshakespeare_short.txt').read().lower()
x, y, len_chars = gen_char_sequence_xtym(text, maxlen = 30, step = 1)
del text

train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4)

# optimizer definition
opt = register_opt(optimizer_name = 'rmsprop', momentum = 0.1, learning_rate = 0.01)

# model definition
model = Sequential()
model.add(LSTM(128, activation = 'tanh', input_shape = (30, len_chars)))
model.add(Flatten())
model.add(Dense(len_chars,  activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = opt)

model.summary('shakespeare lstm')

model_epochs = 20
fit_stats = model.fit(train_data,
                      train_label,
                      batch_size      = 128,
                      epochs          = model_epochs,