Esempio n. 1
0
def train_a_teacher_network():
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    x_image = tf.reshape(x, [-1,28,28,1])
    net = ops.conv2d(x_image, 32, [5, 5], scope='conv1', stddev=0.1, bias=0.1)
    net = ops.max_pool(net, [2, 2], scope='pool1')
    net = ops.conv2d(net, 64, [5, 5], scope='conv2', stddev=0.1, bias=0.1)
    net = ops.max_pool(net, [2, 2], scope='pool2')
    net = ops.flatten(net, scope='pool2_flat')
    net = ops.fc(net, 1024, scope='fc1', stddev=0.1, bias=0.1)
    net = ops.fc(net, 10, activation=None, scope='fc2', stddev=0.1, bias=0.1)
    y_conv = tf.nn.softmax(net)
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), axis=[1]))
    model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('loss', cross_entropy)
    tf.summary.scalar('acc', accuracy)
    merged = tf.summary.merge_all()
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter('./logs', sess.graph)
        sess.run(tf.global_variables_initializer())
        print('Teacher Network...')
        for i in range(MAX_ITER):
            batch = mnist.train.next_batch(BATCH_SIZE)
            sess.run(model, feed_dict={x: batch[0], y_: batch[1]})
            # saver.save(sess, './my-model', global_step=TEST_ITER)
            if i % 100 == 0:
                summary_str, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                writer.add_summary(summary_str, i)
                print('[Iter: {}] Validation Accuracy : {:.4f}'.format(i,acc))
                saver.save(sess, './my-model', global_step=TEST_ITER)
Esempio n. 2
0
 def testCreateFCWithoutWD(self):
     height, width = 3, 3
     with self.test_session():
         inputs = tf.random_uniform((5, height * width * 3), seed=1)
         ops.fc(inputs, 32, weight_decay=0)
         self.assertEquals(
             tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
Esempio n. 3
0
def train_a_teacher_network():
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    x_image = tf.reshape(x, [-1,28,28,1])
    net = ops.conv2d(x_image, 32, [5, 5], scope='conv1', stddev=0.1, bias=0.1)
    net = ops.max_pool(net, [2, 2], scope='pool1')
    net = ops.conv2d(net, 64, [5, 5], scope='conv2', stddev=0.1, bias=0.1)
    net = ops.max_pool(net, [2, 2], scope='pool2')
    net = ops.flatten(net, scope='pool2_flat')
    net = ops.fc(net, 1024, scope='fc1', stddev=0.1, bias=0.1)
    net = ops.fc(net, 10, activation=None, scope='fc2', stddev=0.1, bias=0.1)
    y_conv = tf.nn.softmax(net)
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
    model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('loss', cross_entropy)
    tf.scalar_summary('acc', accuracy)
    merged = tf.merge_all_summaries()
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        writer = tf.train.SummaryWriter('./logs', sess.graph)
        sess.run(tf.initialize_all_variables())
        print 'Teacher Network...'
        for i in range(MAX_ITER):
            batch = mnist.train.next_batch(BATCH_SIZE)
            sess.run(model, feed_dict={x: batch[0], y_: batch[1]})
            if i % 100 == 0:
                summary_str, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                writer.add_summary(summary_str, i)
                print '[Iter: {}] Validation Accuracy : {:.4f}'.format(i,acc)
                saver.save(sess, 'my-model', global_step=TEST_ITER)
Esempio n. 4
0
def train_a_student_network_wider():
    new_width_conv = 128
    new_w1, new_b1, new_w2, new_b2 = tf_net2wider(MODEL, WEIGHT, 'conv1',
                                                  'conv2', new_width_conv)
    with tf.Graph().as_default():
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            x = tf.placeholder(tf.float32, shape=[None, 784])
            y_ = tf.placeholder(tf.float32, shape=[None, 10])
            x_image = tf.reshape(x, [-1, 28, 28, 1])
            net = ops.conv2d(x_image,
                             new_width_conv, [5, 5],
                             scope='conv1',
                             initializer='constant',
                             weights=new_w1,
                             bias=new_b1,
                             restore=False)
            net = ops.max_pool(net, [2, 2], scope='pool1')
            net = ops.conv2d(net,
                             64, [5, 5],
                             scope='conv2',
                             initializer='constant',
                             weights=new_w2,
                             bias=new_b2,
                             restore=False)
            net = ops.max_pool(net, [2, 2], scope='pool2')
            net = ops.flatten(net, scope='pool2_flat')
            net = ops.fc(net, 1024, scope='fc1')
            net = ops.fc(net, 10, activation=None, scope='fc2')
            y_conv = tf.nn.softmax(net)
            cross_entropy = tf.reduce_mean(
                -tf.reduce_sum(y_ * tf.log(y_conv), axis=[1]))
            model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
            correct_prediction = tf.equal(tf.argmax(y_conv, 1),
                                          tf.argmax(y_, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar('loss', cross_entropy)
            tf.summary.scalar('acc', accuracy)
            merged = tf.summary.merge_all()
            saver = tf.train.Saver()
            writer = tf.summary.FileWriter('./logs-wider', sess.graph)
            sess.run(tf.global_variables_initializer())
            variables_to_restore = tf.get_collection(
                variables.VARIABLES_TO_RESTORE)
            saver = tf.train.Saver(variables_to_restore)
            saver.restore(sess, WEIGHT)
            print('Net2Wider...')
            for i in range(MAX_ITER):
                batch = mnist.train.next_batch(BATCH_SIZE)
                sess.run(model, feed_dict={x: batch[0], y_: batch[1]})
                if i % 100 == 0:
                    summary_str, acc = sess.run([merged, accuracy],
                                                feed_dict={
                                                    x: mnist.test.images,
                                                    y_: mnist.test.labels
                                                })
                    writer.add_summary(summary_str, i)
                    print('[Iter: {}] Validation Accuracy : {:.4f}'.format(
                        i, acc))
Esempio n. 5
0
 def testNonReuseVars(self):
     height, width = 3, 3
     inputs = tf.random_uniform((5, height * width * 3), seed=1)
     with self.test_session():
         ops.fc(inputs, 32)
         self.assertEquals(len(variables.get_variables('FC')), 2)
         ops.fc(inputs, 32)
         self.assertEquals(len(variables.get_variables('FC')), 4)
Esempio n. 6
0
 def testReuseVars(self):
     height, width = 3, 3
     inputs = tf.random_uniform((5, height * width * 3), seed=1)
     with self.test_session():
         ops.fc(inputs, 32, scope='fc1')
         self.assertEquals(len(variables.get_variables('fc1')), 2)
         ops.fc(inputs, 32, scope='fc1', reuse=True)
         self.assertEquals(len(variables.get_variables('fc1')), 2)
Esempio n. 7
0
 def testReuseFCWithBatchNorm(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height * width * 3), seed=1)
         with scopes.arg_scope([ops.fc], batch_norm_params={'decay': 0.9}):
             net = ops.fc(images, 27, scope='fc1')
             net = ops.fc(net, 27, scope='fc1', reuse=True)
         self.assertEquals(len(variables.get_variables()), 4)
         self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3)
Esempio n. 8
0
 def testCreateFcCreatesWeightsAndBiasesVars(self):
     height, width = 3, 3
     inputs = tf.random_uniform((5, height * width * 3), seed=1)
     with self.test_session():
         self.assertFalse(variables.get_variables('fc1/weights'))
         self.assertFalse(variables.get_variables('fc1/biases'))
         ops.fc(inputs, 32, scope='fc1')
         self.assertTrue(variables.get_variables('fc1/weights'))
         self.assertTrue(variables.get_variables('fc1/biases'))
Esempio n. 9
0
 def testCreateFCWithWD(self):
     height, width = 3, 3
     with self.test_session() as sess:
         inputs = tf.random_uniform((5, height * width * 3), seed=1)
         ops.fc(inputs, 32, weight_decay=0.01)
         wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
         self.assertEquals(wd.op.name,
                           'FC/weights/Regularizer/L2Regularizer/value')
         sess.run(tf.global_variables_initializer())
         self.assertTrue(sess.run(wd) <= 0.01)
Esempio n. 10
0
 def testFCWithBatchNorm(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height * width * 3), seed=1)
         with scopes.arg_scope([ops.fc], batch_norm_params={}):
             net = ops.fc(images, 27)
             net = ops.fc(net, 27)
         self.assertEquals(len(variables.get_variables()), 8)
         self.assertEquals(len(variables.get_variables('FC/BatchNorm')), 3)
         self.assertEquals(len(variables.get_variables('FC_1/BatchNorm')),
                           3)
Esempio n. 11
0
 def testReuseFCWithWD(self):
     height, width = 3, 3
     with self.test_session():
         inputs = tf.random_uniform((5, height * width * 3), seed=1)
         ops.fc(inputs, 32, weight_decay=0.01, scope='fc')
         self.assertEquals(len(variables.get_variables()), 2)
         self.assertEquals(
             len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
         ops.fc(inputs, 32, weight_decay=0.01, scope='fc', reuse=True)
         self.assertEquals(len(variables.get_variables()), 2)
         self.assertEquals(
             len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
Esempio n. 12
0
def three_fc(x, num_units_out, *args, **kwargs):
    in_s = [y.value for y in x.get_shape()]
    flat_x = tf.reshape(x, [-1, in_s[-1]])
    o = fc(flat_x, num_units_out=num_units_out, *args, **kwargs)
    out = tf.reshape(o, [-1, in_s[1], num_units_out])
    out.set_shape( in_s[0:-1]+[num_units_out])
    return out
Esempio n. 13
0
 def testCreateFC(self):
     height, width = 3, 3
     with self.test_session():
         inputs = tf.random_uniform((5, height * width * 3), seed=1)
         output = ops.fc(inputs, 32)
         self.assertEquals(output.op.name, 'FC/Relu')
         self.assertListEqual(output.get_shape().as_list(), [5, 32])
Esempio n. 14
0
def cppn_func(inp, context, z):
    with arg_scope([fc], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2 - 1
        #n = 64
        n = 32
        h = inp[:, :, 0:1]
        w = inp[:, :, 1:2]

        r_h = sin_bank(h, 64, length=3)
        fc_h = three_fc(r_h, num_units_out=n)

        r_w = sin_bank(w, 64, length=3)
        fc_w = three_fc(r_w, num_units_out=n)

        d = tf.sqrt((h-0.5)**2 + (w-0.5)**2)
        r_d = sin_bank(d, 64, length=3)
        fc_d = three_fc(r_d, num_units_out=n)

        #fc_inp = three_fc(inp-0.5, num_units_out=n)

        pi = 3.1415 / 2.0
        wh = tf.cos(pi) * h - tf.sin(w)
        r_wh = sin_bank(wh, 64, length=3)
        fc_wh = three_fc(r_wh, num_units_out=n)


        context_proc = fc(flatten(context), num_units_out=n)
        context_proc = tf.expand_dims(context_proc, 1)

        z_comb = fc(z, num_units_out=n)
        z_comb = tf.expand_dims(z_comb, 1)

        #res = (fc_h + fc_w + fc_d) * context_proc + z_comb
        res = (fc_h + fc_w + fc_d + fc_wh) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = fc_h + fc_w
        z_mul = fc(z, num_units_out=n)
        z_mul = tf.expand_dims(z_mul, 1)

        #res *= z_mul

        h = three_fc(res, num_units_out=n)
        h2 = three_fc(h, num_units_out=n)
        h3 = three_fc(h2, num_units_out=n)
        return three_fc(h3, num_units_out=1, batch_norm_params=None)
Esempio n. 15
0
def discrim16(inp):
  with arg_scope([conv2d], batch_norm_params=batch_norm_params, stddev=0.02, activation=lrelu, weight_decay=1e-5):
    o = conv2d(inp, num_filters_out=32, kernel_size=(3, 3), stride=1)
    o = conv2d(o, num_filters_out=32, kernel_size=(3, 3), stride=2)
    o = conv2d(o, num_filters_out=32, kernel_size=(3, 3), stride=1)
    o = conv2d(o, num_filters_out=32, kernel_size=(3, 3), stride=2)
    o = conv2d(o, num_filters_out=32, kernel_size=(3, 3), stride=1)
    return fc(flatten(o), num_units_out=1, activation=tf.nn.sigmoid)
Esempio n. 16
0
def top_layers(inputs):

    with tf.variable_scope('top_layers'):
        out = ops.conv2d(inputs, 96, [4,4], stride=2, scope='top_conv1')
        _,fm_size,fm_size,_ = out.get_shape()
        out = ops.max_pool(out, [fm_size,fm_size], stride=1, scope='top_gpool')

        out = ops.flatten(out, scope='top_flatten')
        out = ops.fc(out, 10, activation=None, bias=0.0, scope='top_logits')

    return out
Esempio n. 17
0
def top_layers(inputs):

    with tf.variable_scope('top_layers'):
        out = ops.conv2d(inputs, 96, [4,4], stride=2, padding='VALID', scope='top_conv1')
        _,fm_size,fm_size,_ = out.get_shape()
        out = ops.max_pool(out, [fm_size,fm_size], stride=1, scope='top_gpool')

        out = ops.flatten(out, scope='top_flatten')
        #out = ops.fc(out, 10, activation=None, bias=0.0, batch_norm_params=None, scope='top_logits')
        bn_out = {'decay': 0.99, 'epsilon': 0.001, 'scale':True}
        out = ops.fc(out, 10, activation=None, batch_norm_params=bn_out, scope='top_logits')

    return out
Esempio n. 18
0
def top_layers(inputs):

    with tf.variable_scope('top_layers'):
        out = ops.conv2d(inputs, 96, [4, 4], stride=2, scope='top_conv1')
        _, fm_size, fm_size, _ = out.get_shape()
        out = ops.max_pool(out, [fm_size, fm_size],
                           stride=1,
                           scope='top_gpool')

        out = ops.flatten(out, scope='top_flatten')
        out = ops.fc(out, 10, activation=None, bias=0.0, scope='top_logits')

    return out
Esempio n. 19
0
def top_layers(inputs):

    with tf.variable_scope('top_layers'):
        out = ops.conv2d(inputs, 96, [4,4], stride=2, padding='VALID', scope='top_conv1')
        _,fm_size,fm_size,_ = out.get_shape()
        out = ops.max_pool(out, [fm_size,fm_size], stride=1, scope='top_gpool')

        out = ops.flatten(out, scope='top_flatten')
        #out = ops.fc(out, 10, activation=None, bias=0.0, batch_norm_params=None, scope='top_logits')
        bn_out = {'decay': 0.99, 'epsilon': 0.001, 'scale':True}
        out = ops.fc(out, 10, activation=None, batch_norm_params=bn_out, scope='top_logits')

    return out
Esempio n. 20
0
def encoder(inp, z_dim):
    n = 32
    with arg_scope([conv2d], batch_norm_params=batch_norm_params, stddev=0.02, activation=lrelu, weight_decay=1e-5):
        inp = inp-0.5
        o = conv2d(inp, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
        flat = flatten(o)
        #flat = flatten(avg_pool(o, kernel_size=3))
        z = fc(flat, num_units_out=z_dim, activation=tf.nn.tanh)/2+.5
        return z
Esempio n. 21
0
def generator_context(z):
    n = 32
    with arg_scope([conv2d, conv2d_transpose], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2-1
        d = 8
        z = fc(z, num_units_out=d*d*32, batch_norm_params=batch_norm_params)
        c = z.get_shape()[1].value / (d*d)
        z = tf.reshape(z, (-1, d, d, c))
        o = conv2d_transpose(z, n, (3, 3), stride=(2, 2))
        o = conv2d_transpose(o, n, (3, 3), stride=(2, 2))
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=4, kernel_size=(3, 3), stride=1)
        attended = o
        return attended
Esempio n. 22
0
def generator(z):
    n = 32
    with arg_scope([conv2d, conv2d_transpose], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2-1
        d = 8
        z = fc(z, num_units_out=d*d*32, batch_norm_params=batch_norm_params)
        c = z.get_shape()[1].value / (d*d)
        z = tf.reshape(z, (-1, d, d, c))
        o = conv2d_transpose(z, n, (3, 3), stride=(2, 2))
        o = conv2d_transpose(o, n, (3, 3), stride=(2, 2))
        o = conv2d(o, num_filters_out=n*2, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=1, kernel_size=(3, 3), stride=1, padding="VALID", batch_norm_params=None)
        out = o[:, 1:29, 1:29, :]
        return out
Esempio n. 23
0
def train_a_student_network_deeper():
    new_w1, new_b1 = tf_net2deeper(MODEL, WEIGHT, 'conv1')
    with tf.Graph().as_default():
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            x = tf.placeholder(tf.float32, shape=[None, 784])
            y_ = tf.placeholder(tf.float32, shape=[None, 10])
            x_image = tf.reshape(x, [-1,28,28,1])
            net = ops.conv2d(x_image, 32, [5, 5], scope='conv1')
            net = ops.conv2d(net, 32, [5, 5], scope='conv1_new', initializer='constant', weights=new_w1, bias=new_b1, restore=False)
            net = ops.max_pool(net, [2, 2], scope='pool1')
            net = ops.conv2d(net, 64, [5, 5], scope='conv2')
            net = ops.max_pool(net, [2, 2], scope='pool2')
            net = ops.flatten(net, scope='pool2_flat')
            net = ops.fc(net, 1024, scope='fc1')
            net = ops.fc(net, 10, activation=None, scope='fc2')
            y_conv = tf.nn.softmax(net)
            cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), axis=[1]))
            model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
            correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar('loss', cross_entropy)
            tf.summary.scalar('acc', accuracy)
            merged = tf.summary.merge_all()
            saver = tf.train.Saver()
            writer = tf.summary.FileWriter('./logs-deeper', sess.graph)
            sess.run(tf.global_variables_initializer())
            variables_to_restore = tf.get_collection(variables.VARIABLES_TO_RESTORE)
            saver = tf.train.Saver(variables_to_restore)
            saver.restore(sess, WEIGHT)
            print('Net2Deeper...')
            for i in range(MAX_ITER):
                batch = mnist.train.next_batch(BATCH_SIZE)
                sess.run(model, feed_dict={x: batch[0], y_: batch[1]})
                if i % 100 == 0:
                    summary_str, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                    writer.add_summary(summary_str, i)
                    print('[Iter: {}] Validation Accuracy : {:.4f}'.format(i,acc))
Esempio n. 24
0
def encoder(inp, z_dim):
    #n = 32
    with arg_scope([conv2d, fc], batch_norm_params=batch_norm_params, stddev=0.02, activation=lrelu, weight_decay=1e-5):
        with tf.device("/gpu:%d"%FLAGS.gpu_num):
            inp = inp-0.5
            o = conv2d(inp, num_filters_out=32, kernel_size=(3, 3), stride=1)
            o = conv2d(o, num_filters_out=32, kernel_size=(3, 3), stride=2)
            o = conv2d(o, num_filters_out=64, kernel_size=(3, 3), stride=2)
            o = conv2d(o, num_filters_out=64, kernel_size=(3, 3), stride=1)
            o = conv2d(o, num_filters_out=128, kernel_size=(3, 3), stride=2)
            o = conv2d(o, num_filters_out=128, kernel_size=(3, 3), stride=1)
            flat = flatten(o)
            z = fc(flat, num_units_out=z_dim, activation=None)
            # normalized between -2 and 2 because of batchnorm
            return tf.nn.sigmoid(z * 2)
Esempio n. 25
0
def train_a_student_network_deeper_rand_init():
    with tf.Graph().as_default():
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            x = tf.placeholder(tf.float32, shape=[None, 784])
            y_ = tf.placeholder(tf.float32, shape=[None, 10])
            x_image = tf.reshape(x, [-1,28,28,1])
            net = ops.conv2d(x_image, 32, [5, 5], scope='conv1', stddev=0.1, bias=0.1)
            net = ops.conv2d(net, 32, [5, 5], scope='conv1_new', stddev=0.1, bias=0.1, restore=False)
            net = ops.max_pool(net, [2, 2], scope='pool1')
            net = ops.conv2d(net, 64, [5, 5], scope='conv2', stddev=0.1, bias=0.1)
            net = ops.max_pool(net, [2, 2], scope='pool2')
            net = ops.flatten(net, scope='pool2_flat')
            net = ops.fc(net, 1024, scope='fc1', stddev=0.1, bias=0.1)
            net = ops.fc(net, 10, activation=None, scope='fc2', stddev=0.1, bias=0.1)
            y_conv = tf.nn.softmax(net)
            cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
            model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
            correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.scalar_summary('loss', cross_entropy)
            tf.scalar_summary('acc', accuracy)
            merged = tf.merge_all_summaries()
            saver = tf.train.Saver()
            writer = tf.train.SummaryWriter('./logs-deeper-rand', sess.graph)
            sess.run(tf.initialize_all_variables())
            variables_to_restore = tf.get_collection(variables.VARIABLES_TO_RESTORE)
            saver = tf.train.Saver(variables_to_restore)
            saver.restore(sess, WEIGHT)
            print 'Net2Deeper Baseline (Rand init)...'
            for i in range(MAX_ITER):
                batch = mnist.train.next_batch(BATCH_SIZE)
                sess.run(model, feed_dict={x: batch[0], y_: batch[1]})
                if i % 100 == 0:
                    summary_str, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                    writer.add_summary(summary_str, i)
                    print '[Iter: {}] Validation Accuracy : {:.4f}'.format(i,acc)
Esempio n. 26
0
def build_a_new_graph():
    with tf.Graph().as_default() as graph:
        x = tf.placeholder(tf.float32, shape=[None, 784])
        tf.add_to_collection('input', x)
        y_ = tf.placeholder(tf.float32, shape=[None, 10])
        tf.add_to_collection('label', y_)
        x_image = tf.reshape(x, [-1,28,28,1])
        net = ops.conv2d(x_image, 64, [5, 5], scope='conv1')
        net = ops.max_pool(net, [2, 2], scope='pool1')
        net = ops.conv2d(net, 64, [5, 5], scope='conv2')
        net = ops.conv2d(net, 64, [5, 5], scope='conv2_new', stddev=0.1, bias=0.1)
        net = ops.max_pool(net, [2, 2], scope='pool2')
        net = ops.flatten(net, scope='pool2_flat')
        net = ops.fc(net, 1024, scope='fc1')
        net = ops.fc(net, 1024, scope='fc1_new')
        net = ops.fc(net, 10, activation=None, scope='fc2')
        y_conv = tf.nn.softmax(net)
        cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
        model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
        tf.add_to_collection('objective', model)
        correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.add_to_collection('accuracy', accuracy)
        return graph
Esempio n. 27
0
def discriminator(inp):
    n = 32
    with arg_scope([conv2d], batch_norm_params=batch_norm_params, stddev=0.02, activation=lrelu, weight_decay=1e-5):
        inp = inp-0.5
        o = conv2d(inp, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=n*2, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n*2, kernel_size=(3, 3), stride=1)
        flat = flatten(o)
        #flat = flatten(avg_pool(o, kernel_size=3))
        prob = fc(flat, num_units_out=1, activation=tf.nn.sigmoid)
        #prob = tf.Print(prob, [prob])
        return prob
Esempio n. 28
0
def encoder(inp, z_dim):
    #n = 32
    with arg_scope([conv2d], batch_norm_params=batch_norm_params, stddev=0.02, activation=lrelu, weight_decay=1e-5):
        with tf.device("/gpu:%d"%FLAGS.gpu_num):
            inp = inp-0.5
            n = 64*4
            o = conv2d(inp, num_filters_out=n, kernel_size=(3, 3), stride=1)
            o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
            o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
            o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
            n = 128*4
            o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
            o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
            flat = flatten(o)
            #flat = flatten(avg_pool(o, kernel_size=3))
            #z = fc(flat, num_units_out=z_dim, activation=tf.nn.tanh)/2+.5
            z = fc(flat, num_units_out=z_dim, activation=tf.nn.sigmoid)
            return z
Esempio n. 29
0
def cppn_func(inp, z):
    with arg_scope([fc],
                #batch_norm_params=batch_norm_params,
                stddev=0.02):
        z = z*2 - 1
        #n = 32
        n = 128

        length = 20
        h = inp[:, :, 0:1]
        w = inp[:, :, 1:2]

        r_h = sin_bank(h, 64, length=length)
        fc_h = three_fc(r_h, num_units_out=n)

        r_w = sin_bank(w, 64, length=length)
        fc_w = three_fc(r_w, num_units_out=n)

        d = tf.sqrt((h-0.5)**2 + (w-0.5)**2)
        r_d = sin_bank(d, 64, length=length)
        fc_d = three_fc(r_d, num_units_out=n)

        pi = 3.1415
        n_angles = 64
        length = 20
        theta = tf.get_variable("rotations", dtype=tf.float32, shape=[n_angles,],
                        initializer=tf.random_uniform_initializer(0.0, pi*2))
        wh = tf.cos(theta) * h - tf.sin(theta)*w
        r_wh = sin_bank(wh, n_angles, length=length)
        fc_wh = three_fc(r_wh, num_units_out=n)

        length = 50
        n_angles = 64
        theta = tf.get_variable("rotations2", dtype=tf.float32, shape=[n_angles,],
                        initializer=tf.random_uniform_initializer(0.0, pi*2))

        wh_hf = tf.cos(theta) * h - tf.sin(theta)*w
        r_wh_hf = sin_bank(wh_hf, n_angles, length=length)
        fc_wh_hf = three_fc(r_wh_hf, num_units_out=n)

        n_angles = 128
        trainable = True
        z_angle = fc(z, num_units_out=n_angles, activation=None, stddev=0.1, trainable=trainable)*10
        z_angle = tf.expand_dims(z_angle, 1)
        z_scale = fc(z, num_units_out=n_angles, activation=None, stddev=0.1, trainable=trainable)*10
        z_scale = tf.expand_dims(z_scale, 1)
        z_shift = fc(z, num_units_out=n_angles, activation=None, stddev=0.1, trainable=trainable)*10
        z_shift = tf.expand_dims(z_shift, 1)
        rot_z = tf.cos(z_angle) * h - tf.sin(z_angle)*w
        fc_zangle = tf.sin(rot_z*z_scale + z_shift)
        fc_zangle_proj = three_fc(fc_zangle, num_units_out=n)

        z_angle = fc(z, num_units_out=n_angles, activation=None, stddev=0.1, trainable=trainable)*10
        z_angle = tf.expand_dims(z_angle, 1)
        z_scale = fc(z, num_units_out=n_angles, activation=None, stddev=0.1, trainable=trainable)*4
        z_scale = tf.expand_dims(z_scale, 1)
        z_shift = fc(z, num_units_out=n_angles, activation=None, stddev=0.1, trainable=trainable)*4
        z_shift = tf.expand_dims(z_shift, 1)
        rot_z = tf.cos(z_angle) * h - tf.sin(z_angle)*w
        fc_zangle = tf.sin(rot_z*z_scale + z_shift)
        fc_zangle_proj_large = three_fc(fc_zangle, num_units_out=n)


        z_comb = fc(z, num_units_out=n)
        z_comb = tf.expand_dims(z_comb, 1)

        #res = (fc_h + fc_w + fc_d) * context_proc + z_comb
        #res = (fc_h + fc_w + fc_d + fc_wh) + z_comb
        #res = (fc_wh + fc_wh_hf) + z_comb
        #res = (fc_wh + fc_wh_hf + fc_d + fc_zangle_proj) + z_comb
        #res = (fc_zangle_proj + fc_zangle_proj_large) + z_comb
        res = (fc_wh + fc_wh_hf + fc_d + fc_zangle_proj + fc_zangle_proj_large) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = fc_h + fc_w
        z_mul = fc(z, num_units_out=n)
        z_mul = tf.expand_dims(z_mul, 1)

        #res *= z_mul

    with arg_scope([fc], batch_norm_params=batch_norm_params, stddev=0.02):
        n = 64
        h = three_fc(res, num_units_out=n)
        h2 = three_fc(h, num_units_out=n)
        #h3 = three_fc(h2, num_units_out=n)
        return three_fc(h2, num_units_out=3, batch_norm_params=None) * 0.5 + 0.5
Esempio n. 30
0
 def testCreateFCWithScope(self):
     height, width = 3, 3
     with self.test_session():
         inputs = tf.random_uniform((5, height * width * 3), seed=1)
         output = ops.fc(inputs, 32, scope='fc1')
         self.assertEquals(output.op.name, 'fc1/Relu')
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
    """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.name_scope(scope, 'inception_v3', [inputs]):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=is_training):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='VALID'):
                # 299 x 299 x 3
                end_points['conv0'] = ops.conv2d(inputs,
                                                 32, [3, 3],
                                                 stride=2,
                                                 scope='conv0')
                # 149 x 149 x 32
                end_points['conv1'] = ops.conv2d(end_points['conv0'],
                                                 32, [3, 3],
                                                 scope='conv1')
                # 147 x 147 x 32
                end_points['conv2'] = ops.conv2d(end_points['conv1'],
                                                 64, [3, 3],
                                                 padding='SAME',
                                                 scope='conv2')
                # 147 x 147 x 64
                end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                                   stride=2,
                                                   scope='pool1')
                # 73 x 73 x 64
                end_points['conv3'] = ops.conv2d(end_points['pool1'],
                                                 80, [1, 1],
                                                 scope='conv3')
                # 73 x 73 x 80.
                end_points['conv4'] = ops.conv2d(end_points['conv3'],
                                                 192, [3, 3],
                                                 scope='conv4')
                # 71 x 71 x 192.
                end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                                   stride=2,
                                                   scope='pool2')
                # 35 x 35 x 192.
                net = end_points['pool2']
            # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='SAME'):
                # mixed: 35 x 35 x 256.
                with tf.variable_scope('mixed_35x35x256a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch5x5, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_35x35x256a'] = net
                # mixed_1: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch5x5, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_35x35x288a'] = net
                # mixed_2: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch5x5, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_35x35x288b'] = net
                # mixed_3: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net,
                                               384, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl,
                                                  96, [3, 3],
                                                  stride=2,
                                                  padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat(
                        axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
                    end_points['mixed_17x17x768a'] = net
                # mixed4: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 128, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 128, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768b'] = net
                # mixed_5: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768c'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768c'] = net
                # mixed_6: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768d'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768d'] = net
                # mixed_7: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768e'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 192, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 192, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768e'] = net
                # Auxiliary Head logits
                aux_logits = tf.identity(end_points['mixed_17x17x768e'])
                with tf.variable_scope('aux_logits'):
                    aux_logits = ops.avg_pool(aux_logits, [5, 5],
                                              stride=3,
                                              padding='VALID')
                    aux_logits = ops.conv2d(aux_logits,
                                            128, [1, 1],
                                            scope='proj')
                    # Shape of feature map before the final layer.
                    shape = aux_logits.get_shape()
                    aux_logits = ops.conv2d(aux_logits,
                                            768,
                                            shape[1:3],
                                            stddev=0.01,
                                            padding='VALID')
                    aux_logits = ops.flatten(aux_logits)
                    aux_logits = ops.fc(aux_logits,
                                        num_classes,
                                        activation=None,
                                        stddev=0.001,
                                        restore=restore_logits)
                    end_points['aux_logits'] = aux_logits
                # mixed_8: 8 x 8 x 1280.
                # Note that the scope below is not changed to not void previous
                # checkpoints.
                # (TODO) Fix the scope when appropriate.
                with tf.variable_scope('mixed_17x17x1280a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 192, [1, 1])
                        branch3x3 = ops.conv2d(branch3x3,
                                               320, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch7x7x3'):
                        branch7x7x3 = ops.conv2d(net, 192, [1, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3,
                                                 192, [3, 3],
                                                 stride=2,
                                                 padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat(
                        axis=3, values=[branch3x3, branch7x7x3, branch_pool])
                    end_points['mixed_17x17x1280a'] = net
                # mixed_9: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3, 384, [1, 3]),
                                ops.conv2d(branch3x3, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                ops.conv2d(branch3x3dbl, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch3x3, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_8x8x2048a'] = net
                # mixed_10: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3, 384, [1, 3]),
                                ops.conv2d(branch3x3, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                ops.conv2d(branch3x3dbl, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch3x3, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_8x8x2048b'] = net
                # Final pooling and prediction
                with tf.variable_scope('logits'):
                    shape = net.get_shape()
                    net = ops.avg_pool(net,
                                       shape[1:3],
                                       padding='VALID',
                                       scope='pool')
                    # 1 x 1 x 2048
                    net = ops.dropout(net, dropout_keep_prob, scope='dropout')
                    net = ops.flatten(net, scope='flatten')
                    # 2048
                    logits = ops.fc(net,
                                    num_classes,
                                    activation=None,
                                    scope='logits',
                                    restore=restore_logits)
                    # 1000
                    end_points['logits'] = logits
                    end_points['predictions'] = tf.nn.softmax(
                        logits, name='predictions')
            return logits, end_points
Esempio n. 32
0
 def testCreateFCWithoutActivation(self):
     height, width = 3, 3
     with self.test_session():
         inputs = tf.random_uniform((5, height * width * 3), seed=1)
         output = ops.fc(inputs, 32, activation=None)
         self.assertEquals(output.op.name, 'FC/xw_plus_b')
Esempio n. 33
0
def cppn_func(inp, context, z):
    with arg_scope([fc], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2 - 1
        n = 32
        n = 64
        n = 128

        length = 20
        h = inp[:, :, 0:1]
        w = inp[:, :, 1:2]

        r_h = sin_bank(h, 64, length=length)
        fc_h = three_fc(r_h, num_units_out=n)

        r_w = sin_bank(w, 64, length=length)
        fc_w = three_fc(r_w, num_units_out=n)

        d = tf.sqrt((h-0.5)**2 + (w-0.5)**2)
        r_d = sin_bank(d, 64, length=length)
        fc_d = three_fc(r_d, num_units_out=n)

        #fc_inp = three_fc(inp-0.5, num_units_out=n)

        pi = 3.1415
        n_angles = 128
        length = 20
        theta = tf.get_variable("rotations", dtype=tf.float32, shape=[n_angles,],
                        initializer=tf.random_uniform_initializer(0.0, pi*2))
        wh = tf.cos(theta) * h - tf.sin(theta)*w
        r_wh = sin_bank(wh, n_angles, length=length)
        fc_wh = three_fc(r_wh, num_units_out=n)

        length = 100
        n_angles = 64
        theta = tf.get_variable("rotations2", dtype=tf.float32, shape=[n_angles,],
                        initializer=tf.random_uniform_initializer(0.0, pi*2))
        wh_hf = tf.cos(theta) * h - tf.sin(theta)*w
        r_wh_hf = sin_bank(wh_hf, n_angles, length=length)
        fc_wh_hf = three_fc(r_wh_hf, num_units_out=n)


        context_proc = fc(flatten(context), num_units_out=n)
        context_proc = tf.expand_dims(context_proc, 1)

        z_comb = fc(z, num_units_out=n)
        z_comb = tf.expand_dims(z_comb, 1)

        #res = (fc_h + fc_w + fc_d) * context_proc + z_comb
        #res = (fc_h + fc_w + fc_d + fc_wh) + z_comb
        #res = (fc_wh + fc_wh_hf) + z_comb
        res = (fc_wh + fc_wh_hf + fc_d) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = fc_h + fc_w
        z_mul = fc(z, num_units_out=n)
        z_mul = tf.expand_dims(z_mul, 1)

        #res *= z_mul

        h = three_fc(res, num_units_out=n)
        h2 = three_fc(h, num_units_out=n)
        h3 = three_fc(h2, num_units_out=n)
        return three_fc(h3, num_units_out=3, batch_norm_params=None)
Esempio n. 34
0
with tf.variable_scope("generator") as gen_scope:
    z = tf.random_uniform([batch_size, z_dim], 0, 1)
    generated = generator(z)
gen_scope.reuse_variables()
gen_vars = [x for x in tf.trainable_variables() if x.name.startswith(gen_scope.name)]

with tf.variable_scope("discriminator") as scope:
    real_probs = discriminator(images)

with tf.variable_scope("discriminator", reuse=True) as scope:
    fake_probs = discriminator(generated)

with tf.variable_scope("encoder") as encoder_scope:
    enc_z = encoder(images, z_dim)
    enc_z_mean = fc(enc_z, num_units_out=z_dim, batch_norm_params=None, activation=None)
    enc_z_sigma = fc(enc_z, num_units_out=z_dim, batch_norm_params=None, activation=None)
    zz = tf.shape(enc_z)
    enc_sampled_z = tf.exp(enc_z_sigma) * tf.random_normal(tf.shape(enc_z)) + enc_z_mean

with tf.variable_scope(gen_scope, reuse=True):
    ae_generated = generator(enc_sampled_z)

dis_vars = [x for x in tf.trainable_variables() if x.name.startswith(scope.name)]


discrim_loss = -(tf.log(real_probs) + tf.log(1-fake_probs))
discrim_loss_mean = tf.reduce_mean(discrim_loss)
generator_loss = -(tf.log(fake_probs))
#generator_loss = (1 - generated)**2
generator_loss_mean = tf.reduce_mean(generator_loss)