Beispiel #1
0
    def recon(self, features, _recon, _noise, SR, name):
        with tf.variable_scope(name):
            _recon_h1 = int(_recon.get_shape()[1])
            _recon_h2 = int(_recon.get_shape()[2])
            delta = tf.get_variable(name='delta', initializer=tf.constant(0.1))
            eta = tf.get_variable(name='eta', initializer=tf.constant(0.9))

            down = bicubic_interp_2d(
                _recon,
                [int(_recon_h1 / SR), int(_recon_h2 / SR)])
            err1 = bicubic_interp_2d(down - _noise, [_recon_h1, _recon_h2])

            err2 = _recon - features
            out = _recon - delta * (err1 + eta * err2)
        return out
    def _transform(V, U, out_size):
        with tf.variable_scope('_transform'):
            num_batch = tf.shape(U)[0]
            height = tf.shape(U)[1]
            width = tf.shape(U)[2]
            num_channels = tf.shape(U)[3]

            # grid of (x_t, y_t, 1), eq (1) in ref [1]
            height_f = tf.cast(height, 'float32')
            width_f = tf.cast(width, 'float32')
            out_height = out_size[0]
            out_width = out_size[1]
            grid = _meshgrid(out_height, out_width)     # [2, h*w]
            grid = tf.reshape(grid, [-1])               # [2*h*w]
            grid = tf.tile(grid, tf.stack([num_batch]))           # [n*2*h*w]
            grid = tf.reshape(grid, tf.stack([num_batch, 2, -1])) # [n, 2, h*w]

            # transform (x, y)^T -> (x+vx, x+vy)^T
            V = bicubic_interp_2d(V, out_size)
            V = tf.transpose(V, [0, 3, 1, 2])           # [n, 2, h, w]
            V = tf.reshape(V, [num_batch, 2, -1])       # [n, 2, h*w]
            T_g = tf.add(V, grid)                       # [n, 2, h*w]

            x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
            y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
            x_s_flat = tf.reshape(x_s, [-1])
            y_s_flat = tf.reshape(y_s, [-1])

            input_transformed = _interpolate(
                U, x_s_flat, y_s_flat, out_size)

            output = tf.reshape(
                input_transformed, 
                tf.stack([num_batch, out_height, out_width, num_channels]))
            return output
Beispiel #3
0
    def _transform(V, U, out_size):
        with tf.variable_scope('_transform'):
            num_batch = tf.shape(U)[0]
            height = tf.shape(U)[1]
            width = tf.shape(U)[2]
            num_channels = tf.shape(U)[3]

            height_f = tf.cast(height, 'float32')
            width_f = tf.cast(width, 'float32')
            out_height = out_size[0]
            out_width = out_size[1]
            grid = _meshgrid(out_height, out_width)
            grid = tf.reshape(grid, [-1])
            grid = tf.tile(grid, tf.stack([num_batch]))
            grid = tf.reshape(grid, tf.stack([num_batch, 2, -1]))

            V = bicubic_interp_2d(V, out_size)
            V = tf.transpose(V, [0, 3, 1, 2])
            V = tf.reshape(V, [num_batch, 2, -1])
            T_g = tf.add(V, grid)

            x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
            y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
            x_s_flat = tf.reshape(x_s, [-1])
            y_s_flat = tf.reshape(y_s, [-1])

            input_transformed = _interpolate(U, x_s_flat, y_s_flat, out_size)

            output = tf.reshape(
                input_transformed,
                tf.stack([num_batch, out_height, out_width, num_channels]))
            return output
Beispiel #4
0
    def build_graph(self, _input, _label):
        label_h1 = int(_label.get_shape()[1])
        label_h2 = int(_label.get_shape()[2])

        x = bicubic_interp_2d(_input, [label_h1, label_h2])
        y = _input
        with tf.variable_scope("bloclk"):  # tf.AUTO_REUSE

            encode0, down0 = self.Encoding_block(x, name='Encoding_0')
            encode1, down1 = self.Encoding_block(down0, name='Encoding_1')
            encode2, down2 = self.Encoding_block(down1, name='Encoding_2')
            encode3, down3 = self.Encoding_block(down2, name='Encoding_3')

            media_end = self.Encoding_block_end(down3, name='Encoding_end')

            decode3 = self.Decoding_block(media_end,
                                          encode3,
                                          name='Decoding_3')
            decode2 = self.Decoding_block(decode3, encode2, name='Decoding_2')
            decode1 = self.Decoding_block(decode2, encode1, name='Decoding_1')
            decode0 = self.Decoding_block(decode1, encode0, name='Decoding_0')

            decoding_end = self.feature_decoding_end(decode0, name='end')
            conv_out = x + decoding_end

        x = self.recon(conv_out, x, y, SR=2, name='recon_1')

        for i in xrange(5):
            with tf.variable_scope("bloclk", reuse=True):  #tf.AUTO_REUSE
                encode0, down0 = self.Encoding_block(x, name='Encoding_0')
                encode1, down1 = self.Encoding_block(down0, name='Encoding_1')
                encode2, down2 = self.Encoding_block(down1, name='Encoding_2')
                encode3, down3 = self.Encoding_block(down2, name='Encoding_3')

                media_end = self.Encoding_block_end(down3, name='Encoding_end')

                decode3 = self.Decoding_block(media_end,
                                              encode3,
                                              name='Decoding_3')
                decode2 = self.Decoding_block(decode3,
                                              encode2,
                                              name='Decoding_2')
                decode1 = self.Decoding_block(decode2,
                                              encode1,
                                              name='Decoding_1')
                decode0 = self.Decoding_block(decode1,
                                              encode0,
                                              name='Decoding_0')

                decoding_end = self.feature_decoding_end(decode0, name='end')
                conv_out = x + decoding_end

            x = self.recon(conv_out, x, y, SR=2, name='recon_%d' % (i + 2))

        return x
Beispiel #5
0
def _decodeBlock(x, shortcut, rows_odd, cols_odd, cweights, bns, activation=LeakyReLU(alpha=ALPHA)):
    #Add zero padding on bottom and right if odd dimension required at output,
    #giving an output of one greater than required
    x = ZeroPadding2D(padding=((0,rows_odd),(0,cols_odd)))(x)
    # x = UpSampling2D(size=(2,2), interpolation=UPSAMPLE_INTERP)(x)
    up_size = np.array(x.shape)
    up_size[1] *= 2
    up_size[2] *= 2
    x = bicubic_interp_2d(x,(up_size[1],up_size[2]))
    #If padding was added, crop the output to match the target shape
    #print(rows_odd)
    #print(cols_odd)
    x = Cropping2D(cropping=((0,rows_odd),(0,cols_odd)))(x)

    x = Concatenate()([shortcut, x])

    x = res_Block(x, cweights, bns, activation=LeakyReLU(alpha=ALPHA))

    return x
Beispiel #6
0
    def _transform(V, U, out_size):
        num_batch = U.size(0)
        height = U.size(2)
        width = U.size(3)
        num_channels = U.size(1)

        # grid of (x_t, y_t, 1), eq (1) in ref [1]
        # height_f = tf.cast(height, 'float32')
        height_f = float(height)
        # width_f = tf.cast(width, 'float32')
        width_f = float(width)

        out_height = out_size[0]
        out_width = out_size[1]
        grid = _meshgrid(out_height, out_width)     # [2, h*w]

        grid = grid.reshape(-1)               # [2*h*w]
        grid = grid.unsqueeze(0).repeat(num_batch, 1)      # [n, 2*h*w]
        grid = grid.reshape(num_batch, 2, -1)       # [n, 2, h*w]


        # Set source position (x+vx, y+vy)^T
        V = bicubic_interp_2d(V.permute(0, 2, 3, 1), out_size)
        V = V.permute(0, 3, 1, 2)           # [n, 2, h, w]
        V = V.reshape(num_batch, 2, -1)       # [n, 2, h*w]
        T_g = V + grid                       # [n, 2, h*w]

        x_s = T_g[:, 0, :]
        y_s = T_g[:, 1, :]
        x_s_flat = x_s.reshape(-1)
        y_s_flat = y_s.reshape(-1)

        input_transformed = _interpolate(
            U.permute(0, 2, 3, 1), x_s_flat, y_s_flat, out_size)

        output = input_transformed.reshape(num_batch, out_height, out_width, num_channels)
        return output
Beispiel #7
0
import tensorflow as tf
from bicubic_interp import bicubic_interp_2d
import numpy as np

import time

n = 6
x = tf.constant(
  np.array(range(n*n), dtype=np.float32).reshape([1, n, n, 1]))

size = [3,3]

t0 = time.time()
y0 = bicubic_interp_2d(x, size)
y1 = bicubic_interp_2d(x, size, endpoint=True)
#print "Gen graph : ", time.time() - t0
z = tf.image.resize_bicubic(x, size)

sess = tf.Session()
x_ = sess.run(x)
t0 = time.time()
y0_ = sess.run(y0)
y1_ = sess.run(y1)
#print "Interp : ", time.time() - t0
z_ = sess.run(z)

print "input : [{}, {}]".format(n, n)
print x_[0,:,:,0]
print ""

print "tf.image.resize_bicubic : {}".format(size)
import tensorflow as tf
from bicubic_interp import bicubic_interp_2d
import numpy as np

x = tf.constant(np.array(range(9), dtype=np.float32).reshape([1, 3, 3, 1]))

size = [6, 6]
y = bicubic_interp_2d(x, size)
z = tf.image.resize_bicubic(x, size)

sess = tf.Session()
x_ = sess.run(x)
y_ = sess.run(y)
z_ = sess.run(z)

print x_[0, :, :, 0]
print y_[0, :, :, 0]
print z_[0, :, :, 0]
Beispiel #9
0
def generator_model(args, inputs, istrain, reuse):
    """Build generator architecture."""
    # inputs: tensor with shape [bn, 256,256, 1]
    #    inputs = Input(shape=input_shape_generator)
    with tf.variable_scope('gen_', reuse=reuse):
        x = ReflectionPadding2D((3, 3))(inputs)
        x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
        x = batch_norm(x, "bn1", is_train=istrain)
        x = Activation('relu')(x)

        #        x = MaxPooling2D((2, 2), padding='same')(x)e')(x)
        #        x = Conv2D(filters=ngf, kernel_size=(7,7), padding='same')(x)
        #        x = batch_norm(x, "bn2", is_train=istrain)
        #        x = Activation('relu')(x)

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            #            x = ReflectionPadding2D((2, 2))(x)
            if args.max_pooling == False:
                x = Conv2D(filters=ngf * mult * 2,
                           kernel_size=(3, 3),
                           strides=2,
                           padding='valid')(x)
            else:
                x = Conv2D(filters=ngf * mult * 2,
                           kernel_size=(3, 3),
                           strides=1,
                           padding='valid')(x)
                x = MaxPooling2D((2, 2), padding='valid')(x)
#            x = BatchNormalization()(x, training=istrain)
            x = batch_norm(x, "down_bn_" + str(i), is_train=istrain)
            tf.summary.histogram('before_active', x)
            x = Activation('relu')(x)
            tf.summary.histogram('after_activate', x)
        mult = 2**n_downsampling
        for i in range(n_blocks_gen):
            x = res_block(x, ngf * mult, use_dropout=True)


#        for i in range(n_downsampling):
#            mult = 2**(n_downsampling - i)
#            x = UpSampling2D()(x)
#            x = Conv2D(filters=int(ngf * mult / 2),kernel_size=(3,3),padding='same')(x)
##            x = Conv2DTranspose(filters=int(ngf * mult / 2), kernel_size=(3, 3), strides=2, padding='same')(x)
##            x = BatchNormalization()(x, training=istrain)
#            x = batch_norm(x, "up_bn_"+str(i), is_train=istrain)
#            x = LeakyReLU(alpha=0.3)(x)

        x = Conv2D(filters=2, kernel_size=(5, 5), padding='same')(x)
        x = batch_norm(x, "final", is_train=istrain)
        wrap = Activation('sigmoid')(x)
        wrap = tf.multiply(tf.add(wrap, -0.5), 8)
        # dense layer
        dense = tf.layers.flatten(wrap)
        output_size = 128
        output_size = args.final_layer  # we use the args value here to decide the final layer number
        #        output_size1 = 16
        dense_out = tf.layers.dense(inputs=dense, units=output_size * 2)
        #        dense_out1 = tf.layers.dense(inputs=dense_out, units=output_size1*2)
        x_mean = tf.reshape(dense_out, [-1, output_size, 2])
        #        x_mean = Conv2D(filters=2, kernel_size=(1,256), padding='valid')(wrap)

        #        x_layer = wrap[...,0]
        #        x_mean = tf.reduce_max(wrap, axis=2)
        x_mean = tf.expand_dims(x_mean, 2)
        wrap = tf.tile(x_mean, multiples=[1, 1, output_size, 1])
        wrap = bicubic_interp_2d(wrap, imsize)
        outputs = Lambda(WarpST_one,
                         arguments={
                             'inputs': inputs,
                             'name': str(random.random())
                         })(wrap)
        return outputs, wrap[:, :, 0, :]