def test_conv_layers(self): X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]] Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]] with tf.Graph().as_default(): g = zqtflearn.input_data(shape=[None, 4]) g = zqtflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = zqtflearn.conv_2d(g, 4, 2, activation='relu') g = zqtflearn.max_pool_2d(g, 2) g = zqtflearn.fully_connected(g, 2, activation='softmax') g = zqtflearn.regression(g, optimizer='sgd', learning_rate=1.) m = zqtflearn.DNN(g) m.fit(X, Y, n_epoch=100, snapshot_epoch=False) # TODO: Fix test #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5) # Bulk Tests with tf.Graph().as_default(): g = zqtflearn.input_data(shape=[None, 4]) g = zqtflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = zqtflearn.conv_2d(g, 4, 2) g = zqtflearn.conv_2d(g, 4, 1) g = zqtflearn.conv_2d_transpose(g, 4, 2, [2, 2]) g = zqtflearn.max_pool_2d(g, 2)
def discriminator(x, reuse=False): with tf.variable_scope('Discriminator', reuse=reuse): x = zqtflearn.conv_2d(x, 64, 5, activation='tanh') x = zqtflearn.avg_pool_2d(x, 2) x = zqtflearn.conv_2d(x, 128, 5, activation='tanh') x = zqtflearn.avg_pool_2d(x, 2) x = zqtflearn.fully_connected(x, 1024, activation='tanh') x = zqtflearn.fully_connected(x, 2) x = tf.nn.softmax(x) return x
def generator(x, reuse=False): with tf.variable_scope('Generator', reuse=reuse): x = zqtflearn.fully_connected(x, n_units=7 * 7 * 128) x = zqtflearn.batch_normalization(x) x = tf.nn.tanh(x) x = tf.reshape(x, shape=[-1, 7, 7, 128]) x = zqtflearn.upsample_2d(x, 2) x = zqtflearn.conv_2d(x, 64, 5, activation='tanh') x = zqtflearn.upsample_2d(x, 2) x = zqtflearn.conv_2d(x, 1, 5, activation='sigmoid') return x
def build_dqn(num_actions, action_repeat): """ Building a DQN. """ inputs = tf.placeholder(tf.float32, [None, action_repeat, 84, 84]) # Inputs shape: [batch, channel, height, width] need to be changed into # shape [batch, height, width, channel] net = tf.transpose(inputs, [0, 2, 3, 1]) net = zqtflearn.conv_2d(net, 32, 8, strides=4, activation='relu') net = zqtflearn.conv_2d(net, 64, 4, strides=2, activation='relu') net = zqtflearn.fully_connected(net, 256, activation='relu') q_values = zqtflearn.fully_connected(net, num_actions) return inputs, q_values
def test_feed_dict_no_None(self): X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]] Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]] with tf.Graph().as_default(): g = zqtflearn.input_data(shape=[None, 4], name="X_in") g = zqtflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = zqtflearn.conv_2d(g, 4, 2) g = zqtflearn.conv_2d(g, 4, 1) g = zqtflearn.max_pool_2d(g, 2) g = zqtflearn.fully_connected(g, 2, activation='softmax') g = zqtflearn.regression(g, optimizer='sgd', learning_rate=1.) m = zqtflearn.DNN(g) def do_fit(): m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False) self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
# Using MNIST Dataset import zqtflearn.datasets.mnist as mnist mnist_data = mnist.read_data_sets(one_hot=True) # User defined placeholders with tf.Graph().as_default(): # Placeholders for data and labels X = tf.placeholder(shape=(None, 784), dtype=tf.float32) Y = tf.placeholder(shape=(None, 10), dtype=tf.float32) net = tf.reshape(X, [-1, 28, 28, 1]) # Using TFLearn wrappers for network building net = zqtflearn.conv_2d(net, 32, 3, activation='relu') net = zqtflearn.max_pool_2d(net, 2) net = zqtflearn.local_response_normalization(net) net = zqtflearn.dropout(net, 0.8) net = zqtflearn.conv_2d(net, 64, 3, activation='relu') net = zqtflearn.max_pool_2d(net, 2) net = zqtflearn.local_response_normalization(net) net = zqtflearn.dropout(net, 0.8) net = zqtflearn.fully_connected(net, 128, activation='tanh') net = zqtflearn.dropout(net, 0.8) net = zqtflearn.fully_connected(net, 256, activation='tanh') net = zqtflearn.dropout(net, 0.8) net = zqtflearn.fully_connected(net, 10, activation='linear') # Defining other ops using Tensorflow loss = tf.reduce_mean(
def vgg16(input, num_class): x = zqtflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1') x = zqtflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = zqtflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = zqtflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = zqtflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = zqtflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = zqtflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = zqtflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = zqtflearn.dropout(x, 0.5, name='dropout1') x = zqtflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = zqtflearn.dropout(x, 0.5, name='dropout2') x = zqtflearn.fully_connected(x, num_class, activation='softmax', scope='fc8', restore=False) return x
from __future__ import division, print_function, absolute_import import zqtflearn import zqtflearn.data_utils as du # Data loading and preprocessing import zqtflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) X, mean = du.featurewise_zero_center(X) testX = du.featurewise_zero_center(testX, mean) # Building Residual Network net = zqtflearn.input_data(shape=[None, 28, 28, 1]) net = zqtflearn.conv_2d(net, 64, 3, activation='relu', bias=False) # Residual blocks net = zqtflearn.residual_bottleneck(net, 3, 16, 64) net = zqtflearn.residual_bottleneck(net, 1, 32, 128, downsample=True) net = zqtflearn.residual_bottleneck(net, 2, 32, 128) net = zqtflearn.residual_bottleneck(net, 1, 64, 256, downsample=True) net = zqtflearn.residual_bottleneck(net, 2, 64, 256) net = zqtflearn.batch_normalization(net) net = zqtflearn.activation(net, 'relu') net = zqtflearn.global_avg_pool(net) # Regression net = zqtflearn.fully_connected(net, 10, activation='softmax') net = zqtflearn.regression(net, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.1)
testY = zqtflearn.data_utils.to_categorical(testY) # Real-time data preprocessing img_prep = zqtflearn.ImagePreprocessing() img_prep.add_featurewise_zero_center(per_channel=True) # Real-time data augmentation img_aug = zqtflearn.ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_crop([32, 32], padding=4) # Building Residual Network net = zqtflearn.input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) net = zqtflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) net = zqtflearn.residual_block(net, n, 16) net = zqtflearn.residual_block(net, 1, 32, downsample=True) net = zqtflearn.residual_block(net, n - 1, 32) net = zqtflearn.residual_block(net, 1, 64, downsample=True) net = zqtflearn.residual_block(net, n - 1, 64) net = zqtflearn.batch_normalization(net) net = zqtflearn.activation(net, 'relu') net = zqtflearn.global_avg_pool(net) # Regression net = zqtflearn.fully_connected(net, 10, activation='softmax') mom = zqtflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = zqtflearn.regression(net, optimizer=mom, loss='categorical_crossentropy') # Training model = zqtflearn.DNN(net, checkpoint_path='model_resnet_cifar10',