def stack_decoder_layers(init): model = Sequential(init_method=init) model.add(Dense(256, activation='relu', input_shape=(latent_dim, ))) model.add(BatchNormalization()) model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dense(img_dim, activation='sigmoid')) return model
def stack_discriminator_layers(init): model = Sequential(init_method=init) model.add(Dense(256, input_shape=(img_dim, ))) model.add(Activation('leaky_relu', alpha=0.2)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(Activation('leaky_relu', alpha=0.2)) model.add(Dropout(0.25)) model.add(Dense(2, activation='sigmoid')) return model
def stack_generator_layers(init): model = Sequential(init_method=init) model.add(Dense(128, input_shape=(latent_dim, ))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(256)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(img_dim, activation='tanh')) return model
def stack_discriminator_layers(init): model = Sequential(init_method=init) model.add( Conv2D(64, kernel_size=(5, 5), padding='same', input_shape=img_dims)) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(5, 5), padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2)) model.add(Activation('sigmoid')) return model
def stack_generator_layers(init): model = Sequential(init_method=init) model.add(Dense(128 * 7 * 7, input_shape=(latent_dim, ))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Reshape((128, 7, 7))) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=(5, 5), padding='same')) model.add(BatchNormalization(momentum=0.8)) model.add(Activation('leaky_relu')) model.add(UpSampling2D()) model.add(Conv2D(img_channels, kernel_size=(5, 5), padding='same')) model.add(Activation('tanh')) return model
model = Sequential(init_method='he_uniform') model.add( Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(1, 8, 8), padding='same')) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add( Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Dense(10, activation='softmax')) # 10 digits classes model.compile(loss='categorical_crossentropy', optimizer=opt) model_epochs = 12 fit_stats = model.fit(train_data.reshape(-1, 1, 8, 8), one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(test_data.reshape(-1, 1, 8, 8), one_hot(test_label)), shuffle_data=True) predictions = unhot(model.predict(test_data.reshape(-1, 1, 8, 8), True))
data.target, test_size = 0.3, random_seed = 3) # plot samples of training data plot_img_samples(train_data, train_label, dataset = 'cifar', channels = 3) reshaped_image_dims = 3 * 32 * 32 # ==> (channels * height * width) reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32')) reshaped_test_data = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32')) # optimizer definition opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001) model = Sequential() model.add(Dense(1024, input_shape = (3072, ))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(100)) model.add(Activation('softmax')) model.compile(loss = 'cce', optimizer = opt)
from ztlearn.dl.models import Sequential from ztlearn.dl.optimizers import register_opt from ztlearn.dl.layers import LSTM, Flatten, Dense text = open('../../data/text/tinyshakespeare.txt').read().lower() x, y, len_chars = gen_char_sequence_xtym(text, maxlen=30, step=1) del text train_data, test_data, train_label, test_label = train_test_split( x, y, test_size=0.4) opt = register_opt(optimizer_name='rmsprop', momentum=0.1, learning_rate=0.01) # Model definition model = Sequential() model.add(LSTM(128, activation='tanh', input_shape=(30, len_chars))) model.add(Flatten()) model.add(Dense(len_chars, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=opt) model_epochs = 2 fit_stats = model.fit(train_data, train_label, batch_size=128, epochs=model_epochs, validation_data=(test_data, test_label)) plot_metric('Loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss']) plot_metric('Accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'])
fashion_mnist.data, fashion_mnist.target.astype('int'), test_size=0.33, random_seed=5, cut_off=None) # plot samples of training data plot_tiled_img_samples(train_data[:40], train_label[:40], dataset='mnist') # optimizer definition # opt = register_opt(optimizer_name = 'nestrov', momentum = 0.01, lr = 0.0001) opt = register_opt(optimizer_name='adam', momentum=0.001, lr=0.001) # model definition model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784, ))) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add(Dense(10, activation='relu')) # 10 digits classes model.compile(loss='cce', optimizer=opt) model.summary() model_epochs = 5 fit_stats = model.fit(train_data, one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(test_data, one_hot(test_label)), shuffle_data=True)
from ztlearn.utils import * from ztlearn.dl.layers import Dense from ztlearn.dl.models import Sequential from ztlearn.optimizers import register_opt from ztlearn.datasets.iris import fetch_iris data = fetch_iris() train_data, test_data, train_label, test_label = train_test_split( data.data, data.target, test_size=0.3, random_seed=5) # optimizer definition opt = register_opt(optimizer_name='adam', momentum=0.1, learning_rate=0.01) # model definition model = Sequential() model.add(Dense(10, activation='sigmoid', input_shape=(train_data.shape[1], ))) model.add(Dense(3, activation='sigmoid')) # 3 iris_classes model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary('iris mlp') model_epochs = 25 fit_stats = model.fit(train_data, one_hot(train_label), batch_size=10, epochs=model_epochs, validation_data=(test_data, one_hot(test_label)), shuffle_data=True) # eval_stats = model.evaluate(test_data, one_hot(train_label)) predictions = unhot(model.predict(test_data))
sentences_tokens, vocab_size, longest_sentence = get_sentence_tokens(paragraph) sentence_targets = one_hot(np.array([1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1])) train_data, test_data, train_label, test_label = train_test_split(sentences_tokens, sentence_targets, test_size = 0.2, random_seed = 5) # optimizer definition opt = register_opt(optimizer_name = 'adamax', momentum = 0.01, lr = 0.001) model = Sequential() model.add(Embedding(vocab_size, 2, input_length = longest_sentence)) model.add(RNN(5, activation = 'tanh', bptt_truncate = 2, input_shape = (2, longest_sentence))) model.add(Flatten()) model.add(Dense(2, activation = 'softmax')) model.compile(loss = 'bce', optimizer = opt) model.summary('embedded sentences rnn') """ NOTE: batch size should be equal the size of embedding vectors and divisible by the training set size """ model_epochs = 500 fit_stats = model.fit(train_data, train_label, batch_size = 2, epochs = model_epochs,
x, y, seq_len = gen_mult_sequence_xtym(3000, 10, 10) train_data, test_data, train_label, test_label = train_test_split( x, y, test_size=0.3) # plot samples of training data print_seq_samples(train_data, train_label, 0) # optimizer definition opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.01) # model definition model = Sequential() model.add(RNN(5, activation='tanh', bptt_truncate=5, input_shape=(9, seq_len))) model.add(Flatten()) model.add(Dense(seq_len, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary('seq rnn') model_epochs = 15 fit_stats = model.fit(train_data, train_label, batch_size=100, epochs=model_epochs, validation_data=(test_data, test_label)) print_seq_results(model.predict(test_data), test_label, test_data) model_name = model.model_name plot_metric('loss',
# plot samples of training data plot_img_samples(train_data, train_label, dataset = 'cifar', channels = 3) reshaped_image_dims = 3 * 1024 # ==> (channels * (height * width)) reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32')) reshaped_test_data = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32')) # optimizer definition opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001) # model definition model = Sequential() model.add(RNN(256, activation = 'tanh', bptt_truncate = 5, input_shape = (3, 1024))) model.add(Flatten()) model.add(Dense(100, activation = 'softmax')) # 10 digits classes model.compile(loss = 'categorical_crossentropy', optimizer = opt) model.summary(model_name = 'cifar-100 rnn') model_epochs = 10 # add more epochs fit_stats = model.fit(reshaped_train_data.reshape(-1, 3, 1024), one_hot(train_label), batch_size = 128, epochs = model_epochs, validation_data = (reshaped_test_data.reshape(-1, 3, 1024), one_hot(test_label)), shuffle_data = True) predictions = unhot(model.predict(reshaped_test_data.reshape(-1, 3, 1024), True)) print_results(predictions, test_label) plot_img_results(test_data, test_label, predictions, dataset = 'cifar', channels = 3)