Example #1
0
    def test_vbs1(self):

        with tf.Graph().as_default():
            # Data loading and preprocessing
            import zqtflearn.datasets.mnist as mnist
            X, Y, testX, testY = mnist.load_data(one_hot=True)
            X = X.reshape([-1, 28, 28, 1])
            testX = testX.reshape([-1, 28, 28, 1])
            X = X[:20, :, :, :]
            Y = Y[:20, :]
            testX = testX[:10, :, :, :]
            testY = testY[:10, :]

            # Building convolutional network
            network = input_data(shape=[None, 28, 28, 1], name='input')
            network = conv_2d(network,
                              32,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = conv_2d(network,
                              64,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = fully_connected(network, 128, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 10, activation='softmax')
            network = regression(network,
                                 optimizer='adam',
                                 learning_rate=0.01,
                                 loss='categorical_crossentropy',
                                 name='target')

            # Training
            model = zqtflearn.DNN(network, tensorboard_verbose=3)
            model.fit({'input': X}, {'target': Y},
                      n_epoch=1,
                      batch_size=10,
                      validation_set=({
                          'input': testX
                      }, {
                          'target': testY
                      }),
                      validation_batch_size=5,
                      snapshot_step=10,
                      show_metric=True,
                      run_id='convnet_mnist_vbs')

            self.assertEqual(model.train_ops[0].validation_batch_size, 5)
            self.assertEqual(model.train_ops[0].batch_size, 10)
 def make_core_network(network):
     network = zqtflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
     network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
def block17(net, scale=1.0, activation="relu"):
    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    192,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_1x1')))
    tower_conv_1_0 = relu(
        batch_normalization(
            conv_2d(net,
                    128,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv_1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv_1_0,
                    160, [1, 7],
                    bias=False,
                    activation=None,
                    name='Conv2d_0b_1x7')))
    tower_conv_1_2 = relu(
        batch_normalization(
            conv_2d(tower_conv_1_1,
                    192, [7, 1],
                    bias=False,
                    activation=None,
                    name='Conv2d_0c_7x1')))
    tower_mixed = merge([tower_conv, tower_conv_1_2], mode='concat', axis=3)
    tower_out = relu(
        batch_normalization(
            conv_2d(tower_mixed,
                    net.get_shape()[3],
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_1x1')))
    net += scale * tower_out
    if activation:
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net
Example #4
0
    def test_vm1(self):

        with tf.Graph().as_default():
            # Data loading and preprocessing
            import zqtflearn.datasets.mnist as mnist
            X, Y, testX, testY = mnist.load_data(one_hot=True)
            X = X.reshape([-1, 28, 28, 1])
            testX = testX.reshape([-1, 28, 28, 1])
            X = X[:10, :, :, :]
            Y = Y[:10, :]

            # Building convolutional network
            network = input_data(shape=[None, 28, 28, 1], name='input')
            network = conv_2d(network,
                              32,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = conv_2d(network,
                              64,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = fully_connected(network, 128, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='tanh')
            network = dropout(network, 0.8)

            # construct two varaibles to add as additional "valiation monitors"
            # these varaibles are evaluated each time validation happens (eg at a snapshot)
            # and the results are summarized and output to the tensorboard events file,
            # together with the accuracy and loss plots.
            #
            # Here, we generate a dummy variable given by the sum over the current
            # network tensor, and a constant variable.  In practice, the validation
            # monitor may present useful information, like confusion matrix
            # entries, or an AUC metric.
            with tf.name_scope('CustomMonitor'):
                test_var = tf.reduce_sum(tf.cast(network, tf.float32),
                                         name="test_var")
                test_const = tf.constant(32.0, name="custom_constant")

            print("network=%s, test_var=%s" % (network, test_var))
            network = fully_connected(network, 10, activation='softmax')
            network = regression(network,
                                 optimizer='adam',
                                 learning_rate=0.01,
                                 loss='categorical_crossentropy',
                                 name='target',
                                 validation_monitors=[test_var, test_const])

            # Training
            model = zqtflearn.DNN(network, tensorboard_verbose=3)
            model.fit({'input': X}, {'target': Y},
                      n_epoch=1,
                      validation_set=({
                          'input': testX
                      }, {
                          'target': testY
                      }),
                      snapshot_step=10,
                      show_metric=True,
                      run_id='convnet_mnist')

            # check for validation monitor variables
            ats = tf.get_collection("Adam_testing_summaries")
            print("ats=%s" % ats)
            self.assertTrue(
                len(ats) == 4
            )  # should be four variables being summarized: [loss, test_var, test_const, accuracy]

            session = model.session
            print("session=%s" % session)
            trainer = model.trainer
            print("train_ops = %s" % trainer.train_ops)
            top = trainer.train_ops[0]
            vmtset = top.validation_monitors_T
            print("validation_monitors_T = %s" % vmtset)
            with model.session.as_default():
                ats_var_val = zqtflearn.variables.get_value(vmtset[0])
                ats_const_val = zqtflearn.variables.get_value(vmtset[1])
            print("summary values: var=%s, const=%s" %
                  (ats_var_val, ats_const_val))
            self.assertTrue(
                ats_const_val ==
                32)  # test to make sure the constant made it through
            raise ValueError("Invalid Activation.")
    return net


X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299))

num_classes = 17
dropout_keep_prob = 0.8

network = input_data(shape=[None, 299, 299, 3])
conv1a_3_3 = relu(
    batch_normalization(
        conv_2d(network,
                32,
                3,
                strides=2,
                bias=False,
                padding='VALID',
                activation=None,
                name='Conv2d_1a_3x3')))
conv2a_3_3 = relu(
    batch_normalization(
        conv_2d(conv1a_3_3,
                32,
                3,
                bias=False,
                padding='VALID',
                activation=None,
                name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(
    batch_normalization(
        conv_2d(conv2a_3_3,
Example #6
0
from __future__ import division, print_function, absolute_import

import zqtflearn
from zqtflearn.layers.core import input_data, dropout, fully_connected
from zqtflearn.layers.conv import conv_2d, max_pool_2d
from zqtflearn.layers.estimator import regression

# Data loading and preprocessing
import zqtflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True)

# Building 'VGG Network'
network = input_data(shape=[None, 224, 224, 3])

network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
Example #7
0
"""

from __future__ import division, print_function, absolute_import

import zqtflearn
from zqtflearn.layers.core import input_data, dropout, fully_connected
from zqtflearn.layers.conv import conv_2d, max_pool_2d
from zqtflearn.layers.normalization import local_response_normalization
from zqtflearn.layers.estimator import regression

import zqtflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 17, activation='softmax')
Example #8
0
import zqtflearn
from zqtflearn.layers.core import input_data, dropout, fully_connected
from zqtflearn.layers.conv import conv_2d, max_pool_2d
from zqtflearn.layers.normalization import local_response_normalization
from zqtflearn.layers.estimator import regression

# Data loading and preprocessing
import zqtflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])

# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='target')
Example #9
0
# Real-time data preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)

# Convolutional network building
network = input_data(shape=[None, 32, 32, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

# Train using classifier
model = zqtflearn.DNN(network, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=(X_test, Y_test),
          show_metric=True, batch_size=96, run_id='cifar10_cnn')
Example #10
0
    K. Simonyan, A. Zisserman. arXiv technical report, 2014.
Links:
    http://arxiv.org/pdf/1409.1556
"""

import zqtflearn
from zqtflearn.layers.core import input_data, dropout, fully_connected
from zqtflearn.layers.conv import conv_2d, max_pool_2d
from zqtflearn.layers.estimator import regression

# Building 'VGG Network'
input_layer = input_data(shape=[None, 224, 224, 3])

block1_conv1 = conv_2d(input_layer,
                       64,
                       3,
                       activation='relu',
                       name='block1_conv1')
block1_conv2 = conv_2d(block1_conv1,
                       64,
                       3,
                       activation='relu',
                       name='block1_conv2')
block1_pool = max_pool_2d(block1_conv2, 2, strides=2, name='block1_pool')

block2_conv1 = conv_2d(block1_pool,
                       128,
                       3,
                       activation='relu',
                       name='block2_conv1')
block2_conv2 = conv_2d(block2_conv1,
Example #11
0
import zqtflearn
from zqtflearn.data_utils import shuffle, to_categorical
from zqtflearn.layers.core import input_data, dropout, flatten
from zqtflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from zqtflearn.layers.estimator import regression

# Data loading and preprocessing
from zqtflearn.datasets import cifar10
(X, Y), (X_test, Y_test) = cifar10.load_data()
X, Y = shuffle(X, Y)
Y = to_categorical(Y)
Y_test = to_categorical(Y_test)

# Building 'Network In Network'
network = input_data(shape=[None, 32, 32, 3])
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 160, 1, activation='relu')
network = conv_2d(network, 96, 1, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = dropout(network, 0.5)
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = avg_pool_2d(network, 3, strides=2)
network = dropout(network, 0.5)
network = conv_2d(network, 192, 3, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 10, 1, activation='relu')
network = avg_pool_2d(network, 8)
network = flatten(network)
network = regression(network,
Example #12
0
from __future__ import division, print_function, absolute_import

import zqtflearn
from zqtflearn.layers.core import input_data, dropout, fully_connected
from zqtflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from zqtflearn.layers.normalization import local_response_normalization
from zqtflearn.layers.merge_ops import merge
from zqtflearn.layers.estimator import regression

import zqtflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

network = input_data(shape=[None, 227, 227, 3])
conv1_7_7 = conv_2d(network,
                    64,
                    7,
                    strides=2,
                    activation='relu',
                    name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3,
                           64,
                           1,
                           activation='relu',
                           name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce,
                    192,
                    3,
                    activation='relu',
                    name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)