Exemplo n.º 1
0
def test_forward_invariance_90():
   """Test invariance to 90 rotations of the input"""
   tf.reset_default_graph()
   x = tf.placeholder(tf.float32, [4,3,3,1,1,1])
   y = hl.conv2d(x, 1, 3, name='conv_forward_invariance_90')
   inv = hl.stack_magnitudes(y)

   X1 = np.random.randn(1,3,3,1,1,1)
   X2 = np.transpose(X1, (0,2,1,3,4,5))[:,:,::-1,:,:,:]
   X3 = np.transpose(X2, (0,2,1,3,4,5))[:,:,::-1,:,:,:]
   X4 = np.transpose(X3, (0,2,1,3,4,5))[:,:,::-1,:,:,:]
   X = np.concatenate((X1,X2,X3,X4), axis=0)

   with tf.Session() as sess:
      init_op = tf.global_variables_initializer()
      sess.run(init_op)
      Y, Inv = sess.run([y, inv], feed_dict={x: X})

   Y = np.squeeze(Y)
   Inv = np.squeeze(Inv)
   # Look at the difference in order 0 vectors...should be EXACTLY 0
   for i in range(4):
      for j in range(i):
         assert np.amax(np.abs(Y[j,0,:] - Y[i,0,:])) < 1e-5
   print
   # Look at the difference in magnitudes
   for i in range(4):
      for j in range(i):
         assert np.amax(np.abs(Inv[j,:] - Inv[i,:])) < 1e-5
Exemplo n.º 2
0
def test_forward_invariance_90():
   """Test invariance to 90 rotations of the input"""
   tf.reset_default_graph()
   x = tf.placeholder(tf.float32, [4,3,3,1,1,1])
   y = hl.conv2d(x, 1, 3, name='conv_forward_invariance_90')
   inv = hl.stack_magnitudes(y)

   X1 = np.random.randn(1,3,3,1,1,1)
   X2 = np.transpose(X1, (0,2,1,3,4,5))[:,:,::-1,:,:,:]
   X3 = np.transpose(X2, (0,2,1,3,4,5))[:,:,::-1,:,:,:]
   X4 = np.transpose(X3, (0,2,1,3,4,5))[:,:,::-1,:,:,:]
   X = np.concatenate((X1,X2,X3,X4), axis=0)

   with tf.Session() as sess:
      init_op = tf.global_variables_initializer()
      sess.run(init_op)
      Y, Inv = sess.run([y, inv], feed_dict={x: X})

   Y = np.squeeze(Y)
   Inv = np.squeeze(Inv)
   # Look at the difference in order 0 vectors...should be EXACTLY 0
   for i in xrange(4):
      for j in xrange(i):
         assert np.amax(np.abs(Y[j,0,:] - Y[i,0,:])) < 1e-5
   print
   # Look at the difference in magnitudes
   for i in xrange(4):
      for j in xrange(i):
         assert np.amax(np.abs(Inv[j,:] - Inv[i,:])) < 1e-5
Exemplo n.º 3
0
def test_backward_pass_shape():
   """Make sure that a gradient is created"""
   tf.reset_default_graph()
   x = tf.placeholder(tf.float32, [2,3,3,1,1,1])
   y = hl.conv2d(x, 1, 3, name='conv_backward_pass')
   g = tf.gradients(y, x)

   X = np.random.randn(2,3,3,1,1,1)

   with tf.Session() as sess:
      init_op = tf.global_variables_initializer()
      sess.run(init_op)
      G = sess.run(g, feed_dict={x: X})
   assert G[0].shape == (2,3,3,1,1,1)
Exemplo n.º 4
0
def test_forward_pass_shape():
   """Convolve with random noise"""
   for i in range(3,13):
      tf.reset_default_graph()
      x = tf.placeholder(tf.float32, [6,i,i,1,1,4])
      y = hl.conv2d(x, 5, 3, name='conv_forward_pass_shape')

      X = np.random.randn(6,i,i,1,1,4)

      with tf.Session() as sess:
         init_op = tf.global_variables_initializer()
         sess.run(init_op)
         Y = sess.run(y, feed_dict={x: X})
      assert Y.shape == (6,i-2,i-2,2,2,5)
Exemplo n.º 5
0
def test_backward_pass_shape():
   """Make sure that a gradient is created"""
   tf.reset_default_graph()
   x = tf.placeholder(tf.float32, [2,3,3,1,1,1])
   y = hl.conv2d(x, 1, 3, name='conv_backward_pass')
   g = tf.gradients(y, x)

   X = np.random.randn(2,3,3,1,1,1)

   with tf.Session() as sess:
      init_op = tf.global_variables_initializer()
      sess.run(init_op)
      G = sess.run(g, feed_dict={x: X})
   assert G[0].shape == (2,3,3,1,1,1)
Exemplo n.º 6
0
def test_forward_pass_shape():
   """Convolve with random noise"""
   for i in xrange(3,13):
      tf.reset_default_graph()
      x = tf.placeholder(tf.float32, [6,i,i,1,1,4])
      y = hl.conv2d(x, 5, 3, name='conv_forward_pass_shape')

      X = np.random.randn(6,i,i,1,1,4)

      with tf.Session() as sess:
         init_op = tf.global_variables_initializer()
         sess.run(init_op)
         Y = sess.run(y, feed_dict={x: X})
      assert Y.shape == (6,i-2,i-2,2,2,5)
Exemplo n.º 7
0
def deep_mnist(args, x, train_phase):
   """The MNIST-rot model similar to the one in Cohen & Welling, 2016"""
   # Sure layers weight & bias
   order = 1
   # Number of Filters
   nf = args.n_filters
   nf2 = int(nf*args.filter_gain)
   nf3 = int(nf*(args.filter_gain**2.))
   bs = args.batch_size
   fs = args.filter_size
   ncl = args.n_classes
   sm = args.std_mult
   nr = args.n_rings

   # Create bias for final layer
   bias = tf.get_variable('b7', shape=[args.n_classes],
                     initializer=tf.constant_initializer(1e-2))
   x = tf.reshape(x, shape=[bs,args.dim,args.dim,1,1,1])

   # Convolutional Layers with pooling
   with tf.name_scope('block1') as scope:
      cv1 = hn_lite.conv2d(x, nf, fs, padding='SAME', n_rings=nr, name='1')
      cv1 = hn_lite.non_linearity(cv1, tf.nn.relu, name='1')

      cv2 = hn_lite.conv2d(cv1, nf, fs, padding='SAME', n_rings=nr, name='2')
      cv2 = hn_lite.batch_norm(cv2, train_phase, name='bn1')

   with tf.name_scope('block2') as scope:
      cv2 = hn_lite.mean_pool(cv2, ksize=(1,2,2,1), strides=(1,2,2,1))
      cv3 = hn_lite.conv2d(cv2, nf2, fs, padding='SAME', n_rings=nr, name='3')
      cv3 = hn_lite.non_linearity(cv3, tf.nn.relu, name='3')

      cv4 = hn_lite.conv2d(cv3, nf2, fs, padding='SAME', n_rings=nr, name='4')
      cv4 = hn_lite.batch_norm(cv4, train_phase, name='bn2')

   with tf.name_scope('block3') as scope:
      cv4 = hn_lite.mean_pool(cv4, ksize=(1,2,2,1), strides=(1,2,2,1))
      cv5 = hn_lite.conv2d(cv4, nf3, fs, padding='SAME', n_rings=nr, name='5')
      cv5 = hn_lite.non_linearity(cv5, tf.nn.relu, name='5')

      cv6 = hn_lite.conv2d(cv5, nf3, fs, padding='SAME', n_rings=nr, name='6')
      cv6 = hn_lite.batch_norm(cv6, train_phase, name='bn3')

   # Final Layer
   with tf.name_scope('block4') as scope:
      cv7 = hn_lite.conv2d(cv6, ncl, fs, padding='SAME', n_rings=nr, phase=False,
               name='7')
      real = hn_lite.sum_magnitudes(cv7)
      cv7 = tf.reduce_mean(real, axis=[1,2,3,4])
      return tf.nn.bias_add(cv7, bias)
Exemplo n.º 8
0
def deep_cifar(opt, x, train_phase, device='/cpu:0'):
    """High frequency convolutions are unstable, so get rid of them"""
    # Abbreviations
    nf = opt['n_filters']
    fg = opt['filter_gain']
    bs = opt['batch_size']
    fs = opt['filter_size']
    N = opt['resnet_block_multiplicity']

    with tf.device(device):
        initializer = tf.contrib.layers.variance_scaling_initializer()
        Wgap = tf.get_variable('Wfc',
                               shape=[fg * fg * nf, opt['n_classes']],
                               initializer=initializer)
        bgap = tf.get_variable('bfc',
                               shape=[opt['n_classes']],
                               initializer=tf.constant_initializer(1e-2))

        x = tf.reshape(
            x, shape=[bs, opt['dim'], opt['dim'], 1, 1, opt['n_channels']])

    # Convolutional Layers
    res1 = hn_lite.conv2d(x, nf, fs, padding='SAME', name='in', device=device)
    for i in xrange(N):
        name = 'r1_' + str(i)
        res1 = hn_lite.residual_block(res1,
                                      nf,
                                      fs,
                                      2,
                                      train_phase,
                                      name=name,
                                      device=device)
    res2 = hn_lite.mean_pool(res1,
                             ksize=(1, 2, 2, 1),
                             strides=(1, 2, 2, 1),
                             name='mp1')

    for i in xrange(N):
        name = 'r2_' + str(i)
        res2 = hn_lite.residual_block(res2,
                                      fg * nf,
                                      fs,
                                      2,
                                      train_phase,
                                      name=name,
                                      device=device)
    res3 = hn_lite.mean_pool(res2,
                             ksize=(1, 2, 2, 1),
                             strides=(1, 2, 2, 1),
                             name='mp2')

    for i in xrange(N):
        name = 'r3_' + str(i)
        res3 = hn_lite.residual_block(res3,
                                      fg * fg * nf,
                                      fs,
                                      2,
                                      train_phase,
                                      name=name,
                                      device=device)
    res4 = hn_lite.mean_pool(res3,
                             ksize=(1, 2, 2, 1),
                             strides=(1, 2, 2, 1),
                             name='mp3')

    with tf.name_scope('gap') as scope:
        gap = tf.reduce_mean(hn_lite.sum_magnitudes(res4),
                             reduction_indices=[1, 2, 3, 4])
        return tf.nn.bias_add(tf.matmul(gap, Wgap), bgap)
Exemplo n.º 9
0
def deep_mnist(opt, x, train_phase, device='/cpu:0'):
    """High frequency convolutions are unstable, so get rid of them"""
    # Sure layers weight & bias
    order = 1
    # Number of Filters
    nf = opt['n_filters']
    nf2 = int(nf * opt['filter_gain'])
    nf3 = int(nf * (opt['filter_gain']**2.))
    bs = opt['batch_size']
    fs = opt['filter_size']
    nch = opt['n_channels']
    ncl = opt['n_classes']
    sm = opt['std_mult']

    # Create bias for final layer
    with tf.device(device):
        bias = tf.get_variable('b7',
                               shape=[opt['n_classes']],
                               initializer=tf.constant_initializer(1e-2))
        x = tf.reshape(x, shape=[bs, opt['dim'], opt['dim'], 1, 1, nch])

    with arg_scope([hn_lite.conv2d, hn_lite.non_linearity, hn_lite.batch_norm],
                   device=device):
        # Convolutional Layers with pooling
        with tf.name_scope('block1') as scope:
            cv1 = hn_lite.conv2d(x, nf, fs, padding='SAME', name='1')
            cv1 = hn_lite.non_linearity(cv1, tf.nn.relu, name='1')

            cv2 = hn_lite.conv2d(cv1, nf, fs, padding='SAME', name='2')
            cv2 = hn_lite.batch_norm(cv2, train_phase, name='bn1')

        with tf.name_scope('block2') as scope:
            cv2 = hn_lite.mean_pool(cv2,
                                    ksize=(1, 2, 2, 1),
                                    strides=(1, 2, 2, 1))
            cv3 = hn_lite.conv2d(cv2, nf2, fs, padding='SAME', name='3')
            cv3 = hn_lite.non_linearity(cv3, tf.nn.relu, name='3')

            cv4 = hn_lite.conv2d(cv3, nf2, fs, padding='SAME', name='4')
            cv4 = hn_lite.batch_norm(cv4, train_phase, name='bn2')

        with tf.name_scope('block3') as scope:
            cv4 = hn_lite.mean_pool(cv4,
                                    ksize=(1, 2, 2, 1),
                                    strides=(1, 2, 2, 1))
            cv5 = hn_lite.conv2d(cv4, nf3, fs, padding='SAME', name='5')
            cv5 = hn_lite.non_linearity(cv5, tf.nn.relu, name='5')

            cv6 = hn_lite.conv2d(cv5, nf3, fs, padding='SAME', name='6')
            cv6 = hn_lite.batch_norm(cv6, train_phase, name='bn3')

        # Final Layer
        with tf.name_scope('block4') as scope:
            cv7 = hn_lite.conv2d(cv6,
                                 ncl,
                                 fs,
                                 padding='SAME',
                                 phase=False,
                                 name='7')
            real = hn_lite.sum_magnitudes(cv7)
            cv7 = tf.reduce_mean(real, reduction_indices=[1, 2, 3, 4])
            return tf.nn.bias_add(cv7, bias)
Exemplo n.º 10
0
def deep_mnist(args, x, train_phase):
    """The MNIST-rot model similar to the one in Cohen & Welling, 2016"""
    # Sure layers weight & bias
    order = 1
    # Number of Filters
    nf = args.n_filters
    nf2 = int(nf * args.filter_gain)
    nf3 = int(nf * (args.filter_gain**2.))
    bs = args.batch_size
    fs = args.filter_size
    ncl = args.n_classes
    sm = args.std_mult
    nr = args.n_rings

    # Create bias for final layer
    bias = tf.get_variable('b7',
                           shape=[args.n_classes],
                           initializer=tf.constant_initializer(1e-2))
    x = tf.reshape(x, shape=[bs, args.dim, args.dim, 1, 1, 1])

    # Convolutional Layers with pooling
    with tf.name_scope('block1') as scope:
        cv1 = hn_lite.conv2d(x, nf, fs, padding='SAME', n_rings=nr, name='1')
        cv1 = hn_lite.non_linearity(cv1, tf.nn.relu, name='1')

        cv2 = hn_lite.conv2d(cv1, nf, fs, padding='SAME', n_rings=nr, name='2')
        cv2 = hn_lite.batch_norm(cv2, train_phase, name='bn1')

    with tf.name_scope('block2') as scope:
        cv2 = hn_lite.mean_pool(cv2, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1))
        cv3 = hn_lite.conv2d(cv2,
                             nf2,
                             fs,
                             padding='SAME',
                             n_rings=nr,
                             name='3')
        cv3 = hn_lite.non_linearity(cv3, tf.nn.relu, name='3')

        cv4 = hn_lite.conv2d(cv3,
                             nf2,
                             fs,
                             padding='SAME',
                             n_rings=nr,
                             name='4')
        cv4 = hn_lite.batch_norm(cv4, train_phase, name='bn2')

    with tf.name_scope('block3') as scope:
        cv4 = hn_lite.mean_pool(cv4, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1))
        cv5 = hn_lite.conv2d(cv4,
                             nf3,
                             fs,
                             padding='SAME',
                             n_rings=nr,
                             name='5')
        cv5 = hn_lite.non_linearity(cv5, tf.nn.relu, name='5')

        cv6 = hn_lite.conv2d(cv5,
                             nf3,
                             fs,
                             padding='SAME',
                             n_rings=nr,
                             name='6')
        cv6 = hn_lite.batch_norm(cv6, train_phase, name='bn3')

    # Final Layer
    with tf.name_scope('block4') as scope:
        cv7 = hn_lite.conv2d(cv6,
                             ncl,
                             fs,
                             padding='SAME',
                             n_rings=nr,
                             phase=False,
                             name='7')
        real = hn_lite.sum_magnitudes(cv7)
        cv7 = tf.reduce_mean(real, axis=[1, 2, 3, 4])
        return tf.nn.bias_add(cv7, bias)
Exemplo n.º 11
0
def hnet_bsd(args, x, train_phase):
    """High frequency convolutions are unstable, so get rid of them"""
    # Sure layers weight & bias
    order = 1
    nf = int(args.n_filters)
    nf2 = int((args.filter_gain)*nf)
    nf3 = int((args.filter_gain**2)*nf)
    nf4 = int((args.filter_gain**3)*nf)
    bs = args.batch_size
    fs = args.filter_size
    nch = args.n_channels
    nr = args.n_rings
    tp = train_phase
    std = args.std_mult

    x = tf.reshape(x, shape=[bs,args.height,args.width,1,1,3])
    fm = {}

    # Convolutional Layers
    with tf.name_scope('stage1') as scope:
        cv1 = hl.conv2d(x, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_1')
        cv1 = hl.non_linearity(cv1, name='1_1')

        cv2 = hl.conv2d(cv1, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_2')
        cv2 = hl.batch_norm(cv2, tp, name='bn1')
        mags = to_4d(hl.stack_magnitudes(cv2))
        fm[1] = linear(mags, 1, 1, name='sw1')

    with tf.name_scope('stage2') as scope:
        cv3 = hl.mean_pooling(cv2, ksize=(1,2,2,1), strides=(1,2,2,1))
        cv3 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_1')
        cv3 = hl.non_linearity(cv3, name='2_1')

        cv4 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_2')
        cv4 = hl.batch_norm(cv4, train_phase, name='bn2')
        mags = to_4d(hl.stack_magnitudes(cv4))
        fm[2] = linear(mags, 1, 1, name='sw2')

    with tf.name_scope('stage3') as scope:
        cv5 = hl.mean_pooling(cv4, ksize=(1,2,2,1), strides=(1,2,2,1))
        cv5 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_1')
        cv5 = hl.non_linearity(cv5, name='3_1')

        cv6 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_2')
        cv6 = hl.batch_norm(cv6, train_phase, name='bn3')
        mags = to_4d(hl.stack_magnitudes(cv6))
        fm[3] = linear(mags, 1, 1, name='sw3')

    with tf.name_scope('stage4') as scope:
        cv7 = hl.mean_pooling(cv6, ksize=(1,2,2,1), strides=(1,2,2,1))
        cv7 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_1')
        cv7 = hl.non_linearity(cv7, name='4_1')

        cv8 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_2')
        cv8 = hl.batch_norm(cv8, train_phase, name='bn4')
        mags = to_4d(hl.stack_magnitudes(cv8))
        fm[4] = linear(mags, 1, 1, name='sw4')

    with tf.name_scope('stage5') as scope:
        cv9 = hl.mean_pooling(cv8, ksize=(1,2,2,1), strides=(1,2,2,1))
        cv9 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_1')
        cv9 = hl.non_linearity(cv9, name='5_1')

        cv10 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_2')
        cv10 = hl.batch_norm(cv10, train_phase, name='bn5')
        mags = to_4d(hl.stack_magnitudes(cv10))
        fm[5] = linear(mags, 1, 1, name='sw5')

    fms = {}
    side_preds = []
    xsh = tf.shape(x)
    with tf.name_scope('fusion') as scope:
        for key in fm.keys():
            fms[key] = tf.image.resize_images(fm[key], tf.stack([xsh[1], xsh[2]]))
            side_preds.append(fms[key])
        side_preds = tf.concat(axis=3, values=side_preds)

        fms['fuse'] = linear(side_preds, 1, 1, bias_init=0.01, name='side_preds')
        return fms
Exemplo n.º 12
0
KERNEL_WIDTH = 4
#assert KERNEL_WIDTH % 2 == 1
MAX_ORDER = 1

# Unit test -- Convolution
tf.reset_default_graph()

input_images = tf.placeholder(tf.float32, shape=INPUT_SHAPE)
reshaped_input = tf.reshape(input_images, [BATCH_SIZE, WIDTH, WIDTH, 1, 1, NCHANNELS])
hconv = reshaped_input

for layer_index in range(LAYER_COUNT):
    #if layer_index != 0:
    #    hconv = hn_lite.mean_pool(hconv, ksize=(1,2,2,1), strides=(1,2,2,1))
    hconv = hn_lite.conv2d(hconv, 1, KERNEL_WIDTH, padding='SAME', strides=(1,STRIDE,STRIDE,1),
                           max_order=MAX_ORDER, name='hc'+str(layer_index))
real = hn_lite.sum_magnitudes(hconv)                                                                                     
output_images = tf.nn.tanh(tf.reduce_mean(real, reduction_indices=[3,4]))


with tf.Session() as sess:
    global_init_op = tf.global_variables_initializer()
    local_init_op = tf.local_variables_initializer()
    sess.run([global_init_op, local_init_op])
    for i in xrange(360):
        input_image = rotate(EMBEDDED, i, axes=(1,0), reshape=False, order=3)[np.newaxis,:,:,np.newaxis]
        real_output = sess.run(output_images, feed_dict={input_images: input_image})[0,:,:,0]
        real_output = rotate(real_output, -i, axes=(1,0), reshape=False, order=3) / 1.2
        print np.amin(real_output), np.amax(real_output)
        imsave('./nate_experiments/activations/nate_{:d}_/{:04d}.jpg'.format(LAYER_COUNT,i), real_output)