def test_vbs1(self): with tf.Graph().as_default(): # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) X = X[:20, :, :, :] Y = Y[:20, :] testX = testX[:10, :, :, :] testY = testY[:10, :] # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = zqtflearn.DNN(network, tensorboard_verbose=3) model.fit({'input': X}, {'target': Y}, n_epoch=1, batch_size=10, validation_set=({ 'input': testX }, { 'target': testY }), validation_batch_size=5, snapshot_step=10, show_metric=True, run_id='convnet_mnist_vbs') self.assertEqual(model.train_ops[0].validation_batch_size, 5) self.assertEqual(model.train_ops[0].batch_size, 10)
def test_vm1(self): with tf.Graph().as_default(): # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) X = X[:10, :, :, :] Y = Y[:10, :] # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) # construct two varaibles to add as additional "valiation monitors" # these varaibles are evaluated each time validation happens (eg at a snapshot) # and the results are summarized and output to the tensorboard events file, # together with the accuracy and loss plots. # # Here, we generate a dummy variable given by the sum over the current # network tensor, and a constant variable. In practice, the validation # monitor may present useful information, like confusion matrix # entries, or an AUC metric. with tf.name_scope('CustomMonitor'): test_var = tf.reduce_sum(tf.cast(network, tf.float32), name="test_var") test_const = tf.constant(32.0, name="custom_constant") print("network=%s, test_var=%s" % (network, test_var)) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target', validation_monitors=[test_var, test_const]) # Training model = zqtflearn.DNN(network, tensorboard_verbose=3) model.fit({'input': X}, {'target': Y}, n_epoch=1, validation_set=({ 'input': testX }, { 'target': testY }), snapshot_step=10, show_metric=True, run_id='convnet_mnist') # check for validation monitor variables ats = tf.get_collection("Adam_testing_summaries") print("ats=%s" % ats) self.assertTrue( len(ats) == 4 ) # should be four variables being summarized: [loss, test_var, test_const, accuracy] session = model.session print("session=%s" % session) trainer = model.trainer print("train_ops = %s" % trainer.train_ops) top = trainer.train_ops[0] vmtset = top.validation_monitors_T print("validation_monitors_T = %s" % vmtset) with model.session.as_default(): ats_var_val = zqtflearn.variables.get_value(vmtset[0]) ats_const_val = zqtflearn.variables.get_value(vmtset[1]) print("summary values: var=%s, const=%s" % (ats_var_val, ats_const_val)) self.assertTrue( ats_const_val == 32) # test to make sure the constant made it through
if activation: if isinstance(activation, str): net = activations.get(activation)(net) elif hasattr(activation, '__call__'): net = activation(net) else: raise ValueError("Invalid Activation.") return net X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299)) num_classes = 17 dropout_keep_prob = 0.8 network = input_data(shape=[None, 299, 299, 3]) conv1a_3_3 = relu( batch_normalization( conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID', activation=None, name='Conv2d_1a_3x3'))) conv2a_3_3 = relu( batch_normalization( conv_2d(conv1a_3_3, 32, 3,
""" from __future__ import division, print_function, absolute_import import zqtflearn from zqtflearn.layers.core import input_data, dropout, fully_connected from zqtflearn.layers.conv import conv_2d, max_pool_2d from zqtflearn.layers.estimator import regression # Data loading and preprocessing import zqtflearn.datasets.oxflower17 as oxflower17 X, Y = oxflower17.load_data(one_hot=True) # Building 'VGG Network' network = input_data(shape=[None, 224, 224, 3]) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 128, 3, activation='relu') network = conv_2d(network, 128, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation='relu')
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000, valid_portion=0.1) trainX, trainY = train testX, testY = test # Data preprocessing # Sequence padding trainX = pad_sequences(trainX, maxlen=100, value=0.) testX = pad_sequences(testX, maxlen=100, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY) testY = to_categorical(testY) # Building convolutional network network = input_data(shape=[None, 100], name='input') network = zqtflearn.embedding(network, input_dim=10000, output_dim=128) branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2") branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2") branch3 = conv_1d(network, 128,
from __future__ import division, print_function, absolute_import import zqtflearn from zqtflearn.layers.core import input_data, dropout, fully_connected from zqtflearn.layers.conv import highway_conv_2d, max_pool_2d from zqtflearn.layers.normalization import local_response_normalization, batch_normalization from zqtflearn.layers.estimator import regression # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') #highway convolutions with pooling and dropout for i in range(3): for j in [3, 2, 1]: network = highway_conv_2d(network, 16, j, activation='elu') network = max_pool_2d(network, 2) network = batch_normalization(network) network = fully_connected(network, 128, activation='elu') network = fully_connected(network, 256, activation='elu') network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target')
Y = to_categorical(Y) Y_test = to_categorical(Y_test) # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Train using classifier model = zqtflearn.DNN(network, tensorboard_verbose=0)
from zqtflearn.layers.embedding_ops import embedding from zqtflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell from zqtflearn.layers.estimator import regression # IMDB Dataset loading train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000, valid_portion=0.1) trainX, trainY = train testX, testY = test # Data preprocessing # Sequence padding trainX = pad_sequences(trainX, maxlen=200, value=0.) testX = pad_sequences(testX, maxlen=200, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY) testY = to_categorical(testY) # Network building net = input_data(shape=[None, 200]) net = embedding(net, input_dim=20000, output_dim=128) net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128)) net = dropout(net, 0.5) net = fully_connected(net, 2, activation='softmax') net = regression(net, optimizer='adam', loss='categorical_crossentropy') # Training model = zqtflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2) model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
""" Very Deep Convolutional Networks for Large-Scale Visual Recognition. Applying VGG 19-layers convolutional network to Imagenet classification task. References: Very Deep Convolutional Networks for Large-Scale Image Recognition. K. Simonyan, A. Zisserman. arXiv technical report, 2014. Links: http://arxiv.org/pdf/1409.1556 """ import zqtflearn from zqtflearn.layers.core import input_data, dropout, fully_connected from zqtflearn.layers.conv import conv_2d, max_pool_2d from zqtflearn.layers.estimator import regression # Building 'VGG Network' input_layer = input_data(shape=[None, 224, 224, 3]) block1_conv1 = conv_2d(input_layer, 64, 3, activation='relu', name='block1_conv1') block1_conv2 = conv_2d(block1_conv1, 64, 3, activation='relu', name='block1_conv2') block1_pool = max_pool_2d(block1_conv2, 2, strides=2, name='block1_pool') block2_conv1 = conv_2d(block1_pool, 128,
import zqtflearn from zqtflearn.data_utils import shuffle, to_categorical from zqtflearn.layers.core import input_data, dropout, flatten from zqtflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d from zqtflearn.layers.estimator import regression # Data loading and preprocessing from zqtflearn.datasets import cifar10 (X, Y), (X_test, Y_test) = cifar10.load_data() X, Y = shuffle(X, Y) Y = to_categorical(Y) Y_test = to_categorical(Y_test) # Building 'Network In Network' network = input_data(shape=[None, 32, 32, 3]) network = conv_2d(network, 192, 5, activation='relu') network = conv_2d(network, 160, 1, activation='relu') network = conv_2d(network, 96, 1, activation='relu') network = max_pool_2d(network, 3, strides=2) network = dropout(network, 0.5) network = conv_2d(network, 192, 5, activation='relu') network = conv_2d(network, 192, 1, activation='relu') network = conv_2d(network, 192, 1, activation='relu') network = avg_pool_2d(network, 3, strides=2) network = dropout(network, 0.5) network = conv_2d(network, 192, 3, activation='relu') network = conv_2d(network, 192, 1, activation='relu') network = conv_2d(network, 10, 1, activation='relu') network = avg_pool_2d(network, 8) network = flatten(network)