model.add(BatchNormalization()) model.add( Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Dense(10, activation='softmax')) # 10 digits classes model.compile(loss='categorical_crossentropy', optimizer=opt) model_epochs = 12 fit_stats = model.fit(train_data.reshape(-1, 1, 8, 8), one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(test_data.reshape(-1, 1, 8, 8), one_hot(test_label)), shuffle_data=True) predictions = unhot(model.predict(test_data.reshape(-1, 1, 8, 8), True)) print_results(predictions, test_label) plot_digits_img_results(test_data, test_label, predictions) plot_metric('Loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss']) plot_metric('Accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'])
model_epochs = 500 fit_stats = autoencoder.fit(transformed_train_data, transformed_train_label, batch_size=128, epochs=model_epochs, validation_data=(transformed_test_data, transformed_test_label), shuffle_data=True) # generate non rescaled test labels for use in generated digits plot (use the same random_seed as above) _, _, _, test_label = train_test_split(data.data, data.target, test_size=0.2, random_seed=5) predictions = autoencoder.predict(transformed_test_data).reshape( (-1, channels, img_rows, img_cols)) model_name = autoencoder.model_name plot_generated_img_samples(unhot(one_hot(test_label)), predictions, dataset='cifar', channels=3, to_save=False, iteration=model_epochs, model_name=model_name) plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name)
model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(100)) model.add(Activation('softmax')) model.compile(loss = 'cce', optimizer = opt) model.summary(model_name = 'cifar-100 mlp') model_epochs = 12 # change to 200 epochs fit_stats = model.fit(reshaped_train_data, one_hot(train_label), batch_size = 128, epochs = model_epochs, validation_data = (reshaped_test_data, one_hot(test_label)), shuffle_data = True) eval_stats = model.evaluate(reshaped_test_data, one_hot(test_label)) predictions = unhot(model.predict(reshaped_test_data, True)) print_results(predictions, test_label) plot_img_results(test_data, test_label, predictions, dataset = 'cifar', channels = 3) plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model.model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name = model.model_name) plot_metric('evaluation', eval_stats['valid_batches'], eval_stats['valid_loss'], eval_stats['valid_acc'], model_name = model_name, legend = ['loss', 'acc'])
model.add(BatchNormalization()) model.add(Dense(10, activation='softmax')) # 10 digits classes model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary('cifar-10 cnn') model_epochs = 12 # change to 12 epochs fit_stats = model.fit(train_data.reshape(-1, 3, 32, 32), one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(test_data.reshape(-1, 3, 32, 32), one_hot(test_label)), shuffle_data=True) predictions = unhot(model.predict(test_data.reshape(-1, 3, 32, 32), True)) print_results(predictions, test_label) plot_img_results(test_data, test_label, predictions, dataset='cifar', channels=3) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy', model_epochs,
model.add(Dense(10, activation='relu')) # 10 digits classes model.compile(loss='cce', optimizer=opt) model.summary() model_epochs = 5 fit_stats = model.fit(train_data, one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(test_data, one_hot(test_label)), shuffle_data=True) eval_stats = model.evaluate(test_data, one_hot(test_label)) predictions = unhot(model.predict(test_data, True)) print_results(predictions, test_label) plot_img_results(test_data[:40], test_label[:40], predictions, dataset='mnist') # truncate to 40 samples model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name=model_name)
from ztlearn.utils import * from ztlearn.dl.layers import LSTM from ztlearn.dl.models import Sequential from ztlearn.dl.optimizers import register_opt x, y, seq_len = gen_mult_sequence_xtyt(1000, 10, 10) train_data, test_data, train_label, test_label = train_test_split(x, y, test_size = 0.4) print_seq_samples(train_data, train_label) opt = register_opt(optimizer_name = 'adagrad', momentum = 0.01, learning_rate = 0.01) # Model definition model = Sequential() model.add(LSTM(10, activation = 'tanh', input_shape = (10, seq_len))) model.compile(loss = 'categorical_crossentropy', optimizer = opt) model_epochs = 100 fit_stats = model.fit(train_data, train_label, batch_size = 100, epochs = model_epochs, validation_data = (test_data, test_label)) print_seq_results(model.predict(test_data,(0,2,1)), test_label, test_data, unhot_axis = 2) plot_metric('Loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss']) plot_metric('Accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'])
# model definition model = Sequential() model.add(GRU(10, activation='tanh', input_shape=(10, seq_len))) model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary('seq gru') model_epochs = 100 fit_stats = model.fit(train_data, train_label, batch_size=100, epochs=model_epochs, validation_data=(test_data, test_label)) print_seq_results(model.predict(test_data, (0, 2, 1)), test_label, test_data, unhot_axis=2) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name=model_name)
autoencoder = Sequential(init_method=init_type) autoencoder.layers.extend(encoder.layers) autoencoder.layers.extend(decoder.layers) autoencoder.compile(loss='categorical_crossentropy', optimizer=opt) images = range_normalize(data.data.astype(np.float32), 0, 1) # rescale to range [0, 1] train_data, test_data, train_label, test_label = train_test_split( images, images, test_size=0.2, random_seed=15) model_epochs = 500 fit_stats = autoencoder.fit(train_data, train_label, batch_size=64, epochs=model_epochs, validation_data=(test_data, test_label), shuffle_data=True) # generate non rescaled test labels for use in generated digits plot _, _, _, test_label = train_test_split(data.data, data.target, test_size=0.2, random_seed=15) predictions = autoencoder.predict(test_data).reshape((-1, img_rows, img_cols)) plot_generated_digits_samples(unhot(one_hot(test_label)), predictions) plot_metric('Loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss']) plot_metric('Accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'])
model = Sequential() model.add(Dense(10, activation='sigmoid', input_shape=(train_data.shape[1], ))) model.add(Dense(3, activation='sigmoid')) # 3 iris_classes model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary('iris mlp') model_epochs = 25 fit_stats = model.fit(train_data, one_hot(train_label), batch_size=10, epochs=model_epochs, validation_data=(test_data, one_hot(test_label)), shuffle_data=True) # eval_stats = model.evaluate(test_data, one_hot(train_label)) predictions = unhot(model.predict(test_data)) print_results(predictions, test_label) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name=model_name)
model.add(Activation('softmax')) model.compile(loss='cce', optimizer=opt) model.summary(model_name='cifar-10 mlp') model_epochs = 200 # change to 200 epochs fit_stats = model.fit(transformed_train_data, one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(transformed_test_data, one_hot(test_label)), shuffle_data=True) eval_stats = model.evaluate(transformed_test_data, one_hot(test_label)) predictions = unhot(model.predict(transformed_test_data, True)) print_results(predictions, test_label) plot_img_results(test_data, test_label, predictions, dataset='cifar', channels=3) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy',
opt = register_opt(optimizer_name = 'adamax', momentum = 0.01, lr = 0.001) model = Sequential() model.add(Embedding(vocab_size, 2, input_length = longest_sentence)) model.add(RNN(5, activation = 'tanh', bptt_truncate = 2, input_shape = (2, longest_sentence))) model.add(Flatten()) model.add(Dense(2, activation = 'softmax')) model.compile(loss = 'bce', optimizer = opt) model.summary('embedded sentences rnn') """ NOTE: batch size should be equal the size of embedding vectors and divisible by the training set size """ model_epochs = 500 fit_stats = model.fit(train_data, train_label, batch_size = 2, epochs = model_epochs, validation_data = (test_data, test_label)) plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model.model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name = model.model_name) # test out with the first sentence - sentences_tokens[0] output_array = model.predict(np.expand_dims(sentences_tokens[0], axis=0)) print(np.argmax(output_array))
# model definition model = Sequential() model.add(RNN(5, activation='tanh', bptt_truncate=5, input_shape=(9, seq_len))) model.add(Flatten()) model.add(Dense(seq_len, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary('seq rnn') model_epochs = 15 fit_stats = model.fit(train_data, train_label, batch_size=100, epochs=model_epochs, validation_data=(test_data, test_label)) print_seq_results(model.predict(test_data), test_label, test_data) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name=model_name)
reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32')) reshaped_test_data = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32')) # optimizer definition opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001) # model definition model = Sequential() model.add(RNN(256, activation = 'tanh', bptt_truncate = 5, input_shape = (3, 1024))) model.add(Flatten()) model.add(Dense(10, activation = 'softmax')) # 10 digits classes model.compile(loss = 'categorical_crossentropy', optimizer = opt) model.summary(model_name = 'cifar-10 rnn') model_epochs = 100 # add more epochs fit_stats = model.fit(reshaped_train_data.reshape(-1, 3, 1024), one_hot(train_label), batch_size = 128, epochs = model_epochs, validation_data = (reshaped_test_data.reshape(-1, 3, 1024), one_hot(test_label)), shuffle_data = True) predictions = unhot(model.predict(reshaped_test_data.reshape(-1, 3, 1024), True)) print_results(predictions, test_label) plot_img_results(test_data, test_label, predictions, dataset = 'cifar', channels = 3) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name = model_name) plot_metric('accuracy', model_epochs, fit_stats['train_acc'], fit_stats['valid_acc'], model_name = model_name)
model.add(Dense(10, activation='softmax')) # 10 digits classes model.compile(loss='categorical_crossentropy', optimizer=opt) model.summary(model_name='cifar-10 rnn') model_epochs = 100 # add more epochs fit_stats = model.fit(reshaped_train_data.reshape(-1, 3, 1024), one_hot(train_label), batch_size=128, epochs=model_epochs, validation_data=(reshaped_test_data.reshape(-1, 3, 1024), one_hot(test_label)), shuffle_data=True) predictions = unhot( model.predict(reshaped_test_data.reshape(-1, 3, 1024), True)) print_results(predictions, test_label) plot_img_results(test_data, test_label, predictions, dataset='cifar', channels=3) model_name = model.model_name plot_metric('loss', model_epochs, fit_stats['train_loss'], fit_stats['valid_loss'], model_name=model_name) plot_metric('accuracy', model_epochs,