예제 #1
0
    def test_vbs1(self):

        with tf.Graph().as_default():
            # Data loading and preprocessing
            import zqtflearn.datasets.mnist as mnist
            X, Y, testX, testY = mnist.load_data(one_hot=True)
            X = X.reshape([-1, 28, 28, 1])
            testX = testX.reshape([-1, 28, 28, 1])
            X = X[:20, :, :, :]
            Y = Y[:20, :]
            testX = testX[:10, :, :, :]
            testY = testY[:10, :]

            # Building convolutional network
            network = input_data(shape=[None, 28, 28, 1], name='input')
            network = conv_2d(network,
                              32,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = conv_2d(network,
                              64,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = fully_connected(network, 128, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 10, activation='softmax')
            network = regression(network,
                                 optimizer='adam',
                                 learning_rate=0.01,
                                 loss='categorical_crossentropy',
                                 name='target')

            # Training
            model = zqtflearn.DNN(network, tensorboard_verbose=3)
            model.fit({'input': X}, {'target': Y},
                      n_epoch=1,
                      batch_size=10,
                      validation_set=({
                          'input': testX
                      }, {
                          'target': testY
                      }),
                      validation_batch_size=5,
                      snapshot_step=10,
                      show_metric=True,
                      run_id='convnet_mnist_vbs')

            self.assertEqual(model.train_ops[0].validation_batch_size, 5)
            self.assertEqual(model.train_ops[0].batch_size, 10)
 def make_core_network(network):
     network = zqtflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
     network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
예제 #3
0
    def test_vm1(self):

        with tf.Graph().as_default():
            # Data loading and preprocessing
            import zqtflearn.datasets.mnist as mnist
            X, Y, testX, testY = mnist.load_data(one_hot=True)
            X = X.reshape([-1, 28, 28, 1])
            testX = testX.reshape([-1, 28, 28, 1])
            X = X[:10, :, :, :]
            Y = Y[:10, :]

            # Building convolutional network
            network = input_data(shape=[None, 28, 28, 1], name='input')
            network = conv_2d(network,
                              32,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = conv_2d(network,
                              64,
                              3,
                              activation='relu',
                              regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = fully_connected(network, 128, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='tanh')
            network = dropout(network, 0.8)

            # construct two varaibles to add as additional "valiation monitors"
            # these varaibles are evaluated each time validation happens (eg at a snapshot)
            # and the results are summarized and output to the tensorboard events file,
            # together with the accuracy and loss plots.
            #
            # Here, we generate a dummy variable given by the sum over the current
            # network tensor, and a constant variable.  In practice, the validation
            # monitor may present useful information, like confusion matrix
            # entries, or an AUC metric.
            with tf.name_scope('CustomMonitor'):
                test_var = tf.reduce_sum(tf.cast(network, tf.float32),
                                         name="test_var")
                test_const = tf.constant(32.0, name="custom_constant")

            print("network=%s, test_var=%s" % (network, test_var))
            network = fully_connected(network, 10, activation='softmax')
            network = regression(network,
                                 optimizer='adam',
                                 learning_rate=0.01,
                                 loss='categorical_crossentropy',
                                 name='target',
                                 validation_monitors=[test_var, test_const])

            # Training
            model = zqtflearn.DNN(network, tensorboard_verbose=3)
            model.fit({'input': X}, {'target': Y},
                      n_epoch=1,
                      validation_set=({
                          'input': testX
                      }, {
                          'target': testY
                      }),
                      snapshot_step=10,
                      show_metric=True,
                      run_id='convnet_mnist')

            # check for validation monitor variables
            ats = tf.get_collection("Adam_testing_summaries")
            print("ats=%s" % ats)
            self.assertTrue(
                len(ats) == 4
            )  # should be four variables being summarized: [loss, test_var, test_const, accuracy]

            session = model.session
            print("session=%s" % session)
            trainer = model.trainer
            print("train_ops = %s" % trainer.train_ops)
            top = trainer.train_ops[0]
            vmtset = top.validation_monitors_T
            print("validation_monitors_T = %s" % vmtset)
            with model.session.as_default():
                ats_var_val = zqtflearn.variables.get_value(vmtset[0])
                ats_const_val = zqtflearn.variables.get_value(vmtset[1])
            print("summary values: var=%s, const=%s" %
                  (ats_var_val, ats_const_val))
            self.assertTrue(
                ats_const_val ==
                32)  # test to make sure the constant made it through
                3,
                bias=False,
                padding='VALID',
                activation=None,
                name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(
    batch_normalization(
        conv_2d(conv2a_3_3,
                64,
                3,
                bias=False,
                activation=None,
                name='Conv2d_2b_3x3')))
maxpool3a_3_3 = max_pool_2d(conv2b_3_3,
                            3,
                            strides=2,
                            padding='VALID',
                            name='MaxPool_3a_3x3')
conv3b_1_1 = relu(
    batch_normalization(
        conv_2d(maxpool3a_3_3,
                80,
                1,
                bias=False,
                padding='VALID',
                activation=None,
                name='Conv2d_3b_1x1')))
conv4a_3_3 = relu(
    batch_normalization(
        conv_2d(conv3b_1_1,
                192,
예제 #5
0
import zqtflearn
from zqtflearn.layers.core import input_data, dropout, fully_connected
from zqtflearn.layers.conv import conv_2d, max_pool_2d
from zqtflearn.layers.estimator import regression

# Data loading and preprocessing
import zqtflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True)

# Building 'VGG Network'
network = input_data(shape=[None, 224, 224, 3])

network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
예제 #6
0
from zqtflearn.layers.normalization import local_response_normalization, batch_normalization
from zqtflearn.layers.estimator import regression

# Data loading and preprocessing
import zqtflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])

# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
#highway convolutions with pooling and dropout
for i in range(3):
    for j in [3, 2, 1]:
        network = highway_conv_2d(network, 16, j, activation='elu')
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)

network = fully_connected(network, 128, activation='elu')
network = fully_connected(network, 256, activation='elu')
network = fully_connected(network, 10, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='target')

# Training
model = zqtflearn.DNN(network, tensorboard_verbose=0)
model.fit(X,
          Y,
예제 #7
0
from zqtflearn.layers.estimator import regression

# Building 'VGG Network'
input_layer = input_data(shape=[None, 224, 224, 3])

block1_conv1 = conv_2d(input_layer,
                       64,
                       3,
                       activation='relu',
                       name='block1_conv1')
block1_conv2 = conv_2d(block1_conv1,
                       64,
                       3,
                       activation='relu',
                       name='block1_conv2')
block1_pool = max_pool_2d(block1_conv2, 2, strides=2, name='block1_pool')

block2_conv1 = conv_2d(block1_pool,
                       128,
                       3,
                       activation='relu',
                       name='block2_conv1')
block2_conv2 = conv_2d(block2_conv1,
                       128,
                       3,
                       activation='relu',
                       name='block2_conv2')
block2_pool = max_pool_2d(block2_conv2, 2, strides=2, name='block2_pool')

block3_conv1 = conv_2d(block2_pool,
                       256,
예제 #8
0
from zqtflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from zqtflearn.layers.estimator import regression

# Data loading and preprocessing
from zqtflearn.datasets import cifar10
(X, Y), (X_test, Y_test) = cifar10.load_data()
X, Y = shuffle(X, Y)
Y = to_categorical(Y)
Y_test = to_categorical(Y_test)

# Building 'Network In Network'
network = input_data(shape=[None, 32, 32, 3])
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 160, 1, activation='relu')
network = conv_2d(network, 96, 1, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = dropout(network, 0.5)
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = avg_pool_2d(network, 3, strides=2)
network = dropout(network, 0.5)
network = conv_2d(network, 192, 3, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 10, 1, activation='relu')
network = avg_pool_2d(network, 8)
network = flatten(network)
network = regression(network,
                     optimizer='adam',
                     loss='softmax_categorical_crossentropy',
                     learning_rate=0.001)
예제 #9
0
from zqtflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from zqtflearn.layers.normalization import local_response_normalization
from zqtflearn.layers.merge_ops import merge
from zqtflearn.layers.estimator import regression

import zqtflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

network = input_data(shape=[None, 227, 227, 3])
conv1_7_7 = conv_2d(network,
                    64,
                    7,
                    strides=2,
                    activation='relu',
                    name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3,
                           64,
                           1,
                           activation='relu',
                           name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce,
                    192,
                    3,
                    activation='relu',
                    name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3,
                        kernel_size=3,
                        strides=2,