예제 #1
0
def decoder(x):
   print
   print 'x: ', x
 
   d_fc1 = lrelu(fc_layer(x, 256, False, 'd_fc2'))
   print 'd_fc1: ', d_fc1

   d_fc2 = lrelu(fc_layer(d_fc1, 512, False, 'd_fc3'))
   print 'd_fc2: ', d_fc2

   # reshape for use in transpose convolution (deconvolution) 
   # must match conv layers in encoder
   d_fc2 = tf.reshape(d_fc2, (batch_size, 4, 4, 32))
   print 'd_fc2: ', d_fc2
 
   # transpose convolution with a leaky relu activation
   e_transpose_conv1 = lrelu(conv2d_transpose(d_fc2, 2, 2, 32, 'e_transpose_conv1'))
   print 'e_transpose_conv1: ', e_transpose_conv1

   # transpose convolution with a leaky relu activation
   e_transpose_conv2 = lrelu(conv2d_transpose(e_transpose_conv1, 2, 2, 64, 'e_transpose_conv2'))
   print 'e_transpose_conv2: ', e_transpose_conv2
   
   # transpose convolution with a leaky relu activation
   e_transpose_conv3 = lrelu(conv2d_transpose(e_transpose_conv2, 2, 2, 1, 'e_transpose_conv3'))
   print 'e_transpose_conv3: ', e_transpose_conv3

   # since transpose convs make the resolution go 4->8->16->32 (because stride 2)
   # we need to crop to original mnist size (28,28)
   e_transpose_conv3 = e_transpose_conv3[:,:28,:28,:]
   return e_transpose_conv3
예제 #2
0
def energyDecoder(encoded, reuse=False):
    sc = tf.get_variable_scope()
    with tf.variable_scope(sc, reuse=reuse):

        conv5 = layers.conv2d_transpose(encoded,
                                        256,
                                        4,
                                        stride=2,
                                        normalizer_fn=layers.batch_norm,
                                        activation_fn=None,
                                        scope='d_conv5')
        conv5 = lrelu(conv5)

        conv6 = layers.conv2d_transpose(conv5,
                                        128,
                                        4,
                                        stride=2,
                                        normalizer_fn=layers.batch_norm,
                                        activation_fn=None,
                                        scope='d_conv6')
        conv6 = lrelu(conv6)

        conv7 = layers.conv2d_transpose(conv6,
                                        64,
                                        4,
                                        stride=2,
                                        normalizer_fn=layers.batch_norm,
                                        activation_fn=None,
                                        scope='d_conv7')
        conv7 = lrelu(conv7)

        conv8 = layers.conv2d_transpose(conv7,
                                        2,
                                        4,
                                        stride=2,
                                        activation_fn=tf.nn.tanh,
                                        scope='d_conv8')

        print 'encoded:', encoded
        print 'conv5:', conv5
        print 'conv6:', conv6
        print 'conv7:', conv7
        print 'conv8:', conv8

        print 'END D\n'
        tf.add_to_collection('vars', conv5)
        tf.add_to_collection('vars', conv6)
        tf.add_to_collection('vars', conv7)
        tf.add_to_collection('vars', conv8)
        return conv8
예제 #3
0
def energyEncoder(ab_images, reuse=False):
    print 'DISCRIMINATOR'
    sc = tf.get_variable_scope()
    with tf.variable_scope(sc, reuse=reuse):

        conv1 = layers.conv2d(ab_images,
                              64,
                              4,
                              stride=2,
                              activation_fn=None,
                              scope='d_conv1')
        conv1 = lrelu(conv1)
        print 'conv1:', conv1

        conv2 = layers.conv2d(conv1,
                              128,
                              4,
                              stride=2,
                              normalizer_fn=layers.batch_norm,
                              activation_fn=None,
                              scope='d_conv2')
        conv2 = lrelu(conv2)
        print 'conv2:', conv2

        conv3 = layers.conv2d(conv2,
                              256,
                              4,
                              stride=2,
                              normalizer_fn=layers.batch_norm,
                              activation_fn=None,
                              scope='d_conv3')
        conv3 = lrelu(conv3)
        print 'conv3:', conv3

        conv4 = layers.conv2d(conv3,
                              512,
                              4,
                              stride=2,
                              normalizer_fn=layers.batch_norm,
                              activation_fn=None,
                              scope='d_conv4')
        conv4 = lrelu(conv4)
        print 'conv4:', conv4

        tf.add_to_collection('vars', conv1)
        tf.add_to_collection('vars', conv2)
        tf.add_to_collection('vars', conv3)
        tf.add_to_collection('vars', conv4)

        return conv4
예제 #4
0
def netD(L_images, ab_images=None, num_gpu=1, reuse=False):
    ndf = 64
    n_layers = 3
    layers = []
    print
    print 'netD'
    sc = tf.get_variable_scope()
    with tf.variable_scope(sc, reuse=reuse):
        if num_gpu == 0: gpus = ['/cpu:0']
        elif num_gpu == 1: gpus = ['/gpu:0']
        elif num_gpu == 2: gpus = ['/gpu:0', '/gpu:1']
        elif num_gpu == 3: gpus = ['/gpu:0', '/gpu:1', '/gpu:2']
        elif num_gpu == 4: gpus = ['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']

        for d in gpus:
            with tf.device(d):

                s = L_images.get_shape().as_list()[-1]
                if s == 1:
                    input_images = tf.concat([L_images, ab_images], axis=3)
                else:
                    input_images = L_images

                print 'input_images:', input_images

                with tf.variable_scope('d_conv1'):
                    conv1 = lrelu(
                        conv2d(input_images, 64, kernel_size=4, stride=2))
                with tf.variable_scope('d_conv2'):
                    conv2 = lrelu(
                        batch_norm(conv2d(conv1, 128, kernel_size=4,
                                          stride=2)))
                with tf.variable_scope('d_conv3'):
                    conv3 = lrelu(
                        batch_norm(conv2d(conv2, 256, kernel_size=4,
                                          stride=2)))
                with tf.variable_scope('d_conv4'):
                    conv4 = lrelu(
                        batch_norm(conv2d(conv3, 512, kernel_size=4,
                                          stride=1)))
                with tf.variable_scope('d_conv5'):
                    conv5 = conv2d(conv4, 1, stride=1)

                print conv1
                print conv2
                print conv3
                print conv4
                print conv5
                return conv5
예제 #5
0
def netD_ab(ab_images, num_gpu, reuse=False):
    ndf = 64
    n_layers = 3
    layers = []
    print
    print 'netD'
    sc = tf.get_variable_scope()
    with tf.variable_scope(sc, reuse=reuse):
        if num_gpu == 0: gpus = ['/cpu:0']
        elif num_gpu == 1: gpus = ['/gpu:0']
        elif num_gpu == 2: gpus = ['/gpu:0', '/gpu:1']
        elif num_gpu == 3: gpus = ['/gpu:0', '/gpu:1', '/gpu:2']
        elif num_gpu == 4: gpus = ['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']

        for d in gpus:
            with tf.device(d):

                input_images = ab_images

                with tf.variable_scope('d_ab_conv1'):
                    conv1 = lrelu(
                        conv2d(input_images, 64, kernel_size=4, stride=2))
                with tf.variable_scope('d_ab_conv2'):
                    conv2 = lrelu(
                        batch_norm(conv2d(conv1, 128, kernel_size=4,
                                          stride=2)))
                with tf.variable_scope('d_ab_conv3'):
                    conv3 = lrelu(
                        batch_norm(conv2d(conv2, 256, kernel_size=4,
                                          stride=2)))
                with tf.variable_scope('d_ab_conv4'):
                    conv4 = lrelu(
                        batch_norm(conv2d(conv3, 512, kernel_size=4,
                                          stride=1)))
                with tf.variable_scope('d_ab_conv5'):
                    conv5 = conv2d(conv4, 1, stride=1)

                print conv1
                print conv2
                print conv3
                print conv4
                print conv5
                return conv5
예제 #6
0
def encoder(x):
   # convolutional layer with a leaky Relu activation
   e_conv1 = lrelu(conv2d(x, 2, 2, 32, 'e_conv1'))
   print
   print 'conv1: ', e_conv1

   # convolutional layer with a leaky Relu activation
   e_conv2 = lrelu(conv2d(e_conv1, 2, 2, 64, 'e_conv2'))
   print 'conv2: ', e_conv2
   
   # convolutional layer with a leaky Relu activation
   e_conv3 = lrelu(conv2d(e_conv2, 2, 2, 32, 'e_conv3'))
   print 'conv3: ', e_conv3
  
   # fully connected layer with a leaky Relu activation
   # The 'True' here means that we are flattening the input
   e_fc1 = lrelu(fc_layer(e_conv2, 512, True, 'e_fc1'))
   print 'fc1: ', e_fc1

   # fully connected layer with a leaky Relu activation
   # the output from the previous fully connected layer is
   # already flat, so no need to flatten, hence 'False'
   e_fc2 = lrelu(fc_layer(e_fc1, 256, False, 'e_fc2'))
   print 'fc2: ', e_fc2

   e_fc3 = lrelu(fc_layer(e_fc2, 128, False, 'e_fc3'))
   print 'fc3: ', e_fc3
   
   return e_fc3
예제 #7
0
def netG(L_images, UPCONVS, num_gpu=1):

    if num_gpu == 0: gpus = ['/cpu:0']
    elif num_gpu == 1: gpus = ['/gpu:0']
    elif num_gpu == 2: gpus = ['/gpu:0', '/gpu:1']
    elif num_gpu == 3: gpus = ['/gpu:0', '/gpu:1', '/gpu:2']
    elif num_gpu == 4: gpus = ['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']

    for d in gpus:
        with tf.device(d):

            with tf.variable_scope('g_enc1'):
                enc_conv1 = lrelu(conv2d(L_images, 64, stride=2))
            with tf.variable_scope('g_enc2'):
                enc_conv2 = lrelu(
                    batch_norm(conv2d(enc_conv1, 128, stride=2,
                                      kernel_size=4)))
            with tf.variable_scope('g_enc3'):
                enc_conv3 = lrelu(
                    batch_norm(conv2d(enc_conv2, 256, stride=2,
                                      kernel_size=4)))
            with tf.variable_scope('g_enc4'):
                enc_conv4 = lrelu(
                    batch_norm(conv2d(enc_conv3, 512, stride=2,
                                      kernel_size=4)))
            with tf.variable_scope('g_enc5'):
                enc_conv5 = lrelu(
                    batch_norm(conv2d(enc_conv4, 512, stride=2,
                                      kernel_size=4)))
            with tf.variable_scope('g_enc6'):
                enc_conv6 = lrelu(
                    batch_norm(conv2d(enc_conv5, 512, stride=2,
                                      kernel_size=4)))
            with tf.variable_scope('g_enc7'):
                enc_conv7 = lrelu(
                    batch_norm(conv2d(enc_conv6, 512, stride=2,
                                      kernel_size=4)))
            with tf.variable_scope('g_enc8'):
                enc_conv8 = lrelu(
                    batch_norm(conv2d(enc_conv7, 512, stride=2,
                                      kernel_size=4)))

            print 'enc_conv1:', enc_conv1
            print 'enc_conv2:', enc_conv2
            print 'enc_conv3:', enc_conv3
            print 'enc_conv4:', enc_conv4
            print 'enc_conv5:', enc_conv5
            print 'enc_conv6:', enc_conv6
            print 'enc_conv7:', enc_conv7
            print 'enc_conv8:', enc_conv8

            with tf.variable_scope('g_dec1'):
                print '1:', enc_conv8
                if UPCONVS:
                    print 'Using up-convolutions'
                    dec_convt1 = tf.image.resize_nearest_neighbor(
                        enc_conv8, [2, 2])
                    dec_convt1 = conv2d(dec_convt1,
                                        512,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt1 = conv2d_transpose(enc_conv8,
                                                  512,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt1 = batch_norm(dec_convt1)
                dec_convt1 = relu(dec_convt1)
                dec_convt1 = tf.nn.dropout(dec_convt1, keep_prob=0.5)
                print dec_convt1
            with tf.variable_scope('g_dec2'):
                dec_convt2 = tf.concat([dec_convt1, enc_conv7], axis=3)
                print dec_convt2
                if UPCONVS:
                    dec_convt2 = tf.image.resize_nearest_neighbor(
                        dec_convt2, [4, 4])
                    dec_convt2 = conv2d(dec_convt2,
                                        512,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt2 = conv2d_transpose(dec_convt2,
                                                  512,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt2 = batch_norm(dec_convt2)
                dec_convt2 = relu(dec_convt2)
            with tf.variable_scope('g_dec3'):
                dec_convt3 = tf.concat([enc_conv6, dec_convt2], axis=3)
                print dec_convt3
                if UPCONVS:
                    dec_convt3 = tf.image.resize_nearest_neighbor(
                        dec_convt3, [8, 8])
                    dec_convt3 = conv2d(dec_convt3,
                                        512,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt3 = conv2d_transpose(dec_convt3,
                                                  512,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt3 = batch_norm(dec_convt3)
                dec_convt3 = relu(dec_convt3)
            with tf.variable_scope('g_dec4'):
                dec_convt4 = tf.concat([enc_conv5, dec_convt3], axis=3)
                print dec_convt4
                if UPCONVS:
                    dec_convt4 = tf.image.resize_nearest_neighbor(
                        dec_convt4, [16, 16])
                    dec_convt4 = conv2d(dec_convt4,
                                        512,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt4 = conv2d_transpose(dec_convt4,
                                                  512,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt4 = batch_norm(dec_convt4)
                dec_convt4 = relu(dec_convt4)
            with tf.variable_scope('g_dec5'):
                dec_convt5 = tf.concat([enc_conv4, dec_convt4], axis=3)
                print dec_convt5
                if UPCONVS:
                    dec_convt5 = tf.image.resize_nearest_neighbor(
                        dec_convt5, [32, 32])
                    dec_convt5 = conv2d(dec_convt5,
                                        256,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt5 = conv2d_transpose(dec_convt5,
                                                  256,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt5 = batch_norm(dec_convt5)
                dec_convt5 = relu(dec_convt5)
            with tf.variable_scope('g_dec6'):
                dec_convt6 = tf.concat([enc_conv3, dec_convt5], axis=3)
                print dec_convt6
                if UPCONVS:
                    dec_convt6 = tf.image.resize_nearest_neighbor(
                        dec_convt6, [64, 64])
                    dec_convt6 = conv2d(dec_convt6,
                                        128,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt6 = conv2d_transpose(dec_convt6,
                                                  128,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt6 = batch_norm(dec_convt6)
                dec_convt6 = relu(dec_convt6)
            with tf.variable_scope('g_dec7'):
                dec_convt7 = tf.concat([enc_conv2, dec_convt6], axis=3)
                print dec_convt7
                if UPCONVS:
                    dec_convt7 = tf.image.resize_nearest_neighbor(
                        dec_convt7, [128, 128])
                    dec_convt7 = conv2d(dec_convt7,
                                        128,
                                        stride=1,
                                        kernel_size=3)
                else:
                    dec_convt7 = conv2d_transpose(dec_convt7,
                                                  128,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt7 = batch_norm(dec_convt7)
                dec_convt7 = relu(dec_convt7)

            # output layer - ab channels
            with tf.variable_scope('g_dec8'):
                if UPCONVS:
                    dec_convt8 = tf.image.resize_nearest_neighbor(
                        dec_convt7, [256, 256])
                    dec_convt8 = conv2d(dec_convt8, 2, stride=1, kernel_size=3)
                else:
                    dec_convt8 = conv2d_transpose(dec_convt7,
                                                  2,
                                                  stride=2,
                                                  kernel_size=4)
                dec_convt8 = tanh(dec_convt8)

            print dec_convt8

    return dec_convt8