def test_vbs1(self): with tf.Graph().as_default(): # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) X = X[:20, :, :, :] Y = Y[:20, :] testX = testX[:10, :, :, :] testY = testY[:10, :] # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = zqtflearn.DNN(network, tensorboard_verbose=3) model.fit({'input': X}, {'target': Y}, n_epoch=1, batch_size=10, validation_set=({ 'input': testX }, { 'target': testY }), validation_batch_size=5, snapshot_step=10, show_metric=True, run_id='convnet_mnist_vbs') self.assertEqual(model.train_ops[0].validation_batch_size, 5) self.assertEqual(model.train_ops[0].batch_size, 10)
""" K-Means Example """ from __future__ import division, print_function, absolute_import from zqtflearn.estimators import KMeans # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=False) # K-Means training m = KMeans(n_clusters=10, distance='squared_euclidean') m.fit(X, display_step=10) # Testing print("Clusters center coordinates:") print(m.cluster_centers_vars) print("X[0] nearest cluster:") print(m.labels_[0]) print("Predicting testX[0] nearest cluster:") print(m.predict(testX[0])) print("Transforming testX[0] to a cluster-distance space:") print(m.transform(testX[0]))
Links: - [DCGAN Paper](https://arxiv.org/abs/1511.06434). """ from __future__ import division, print_function, absolute_import import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import zqtflearn # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data() X = np.reshape(X, newshape=[-1, 28, 28, 1]) z_dim = 200 # Noise data points total_samples = len(X) # Generator def generator(x, reuse=False): with tf.variable_scope('Generator', reuse=reuse): x = zqtflearn.fully_connected(x, n_units=7 * 7 * 128) x = zqtflearn.batch_normalization(x) x = tf.nn.tanh(x) x = tf.reshape(x, shape=[-1, 7, 7, 128]) x = zqtflearn.upsample_2d(x, 2) x = zqtflearn.conv_2d(x, 64, 5, activation='tanh')
- [VAE Paper] https://arxiv.org/abs/1312.6114 - [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ """ from __future__ import division, print_function, absolute_import import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm import tensorflow as tf import zqtflearn # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) # Params original_dim = 784 # MNIST images are 28x28 pixels hidden_dim = 256 latent_dim = 2 # Building the encoder encoder = zqtflearn.input_data(shape=[None, 784], name='input_images') encoder = zqtflearn.fully_connected(encoder, hidden_dim, activation='relu') z_mean = zqtflearn.fully_connected(encoder, latent_dim) z_std = zqtflearn.fully_connected(encoder, latent_dim) # Sampler: Normal (gaussian) random distribution eps = tf.random_normal(tf.shape(z_std), dtype=tf.float32, mean=0., stddev=1.0, name='epsilon')
from __future__ import division, print_function, absolute_import """ This tutorial will introduce how to combine TFLearn built-in ops with any Tensorflow graph. """ import tensorflow as tf import zqtflearn # ---------------------------------- # Using TFLearn built-in ops example # ---------------------------------- # Using MNIST Dataset import zqtflearn.datasets.mnist as mnist trainX, trainY, testX, testY = mnist.load_data(one_hot=True) # User defined placeholders with tf.Graph().as_default(): # Model variables X = tf.placeholder("float", [None, 784]) Y = tf.placeholder("float", [None, 10]) W1 = tf.Variable(tf.random_normal([784, 256])) W2 = tf.Variable(tf.random_normal([256, 256])) W3 = tf.Variable(tf.random_normal([256, 10])) b1 = tf.Variable(tf.random_normal([256])) b2 = tf.Variable(tf.random_normal([256])) b3 = tf.Variable(tf.random_normal([10]))
def test_vm1(self): with tf.Graph().as_default(): # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) X = X[:10, :, :, :] Y = Y[:10, :] # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) # construct two varaibles to add as additional "valiation monitors" # these varaibles are evaluated each time validation happens (eg at a snapshot) # and the results are summarized and output to the tensorboard events file, # together with the accuracy and loss plots. # # Here, we generate a dummy variable given by the sum over the current # network tensor, and a constant variable. In practice, the validation # monitor may present useful information, like confusion matrix # entries, or an AUC metric. with tf.name_scope('CustomMonitor'): test_var = tf.reduce_sum(tf.cast(network, tf.float32), name="test_var") test_const = tf.constant(32.0, name="custom_constant") print("network=%s, test_var=%s" % (network, test_var)) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target', validation_monitors=[test_var, test_const]) # Training model = zqtflearn.DNN(network, tensorboard_verbose=3) model.fit({'input': X}, {'target': Y}, n_epoch=1, validation_set=({ 'input': testX }, { 'target': testY }), snapshot_step=10, show_metric=True, run_id='convnet_mnist') # check for validation monitor variables ats = tf.get_collection("Adam_testing_summaries") print("ats=%s" % ats) self.assertTrue( len(ats) == 4 ) # should be four variables being summarized: [loss, test_var, test_const, accuracy] session = model.session print("session=%s" % session) trainer = model.trainer print("train_ops = %s" % trainer.train_ops) top = trainer.train_ops[0] vmtset = top.validation_monitors_T print("validation_monitors_T = %s" % vmtset) with model.session.as_default(): ats_var_val = zqtflearn.variables.get_value(vmtset[0]) ats_const_val = zqtflearn.variables.get_value(vmtset[1]) print("summary values: var=%s, const=%s" % (ats_var_val, ats_const_val)) self.assertTrue( ats_const_val == 32) # test to make sure the constant made it through