Beispiel #1
0
def get_G(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    nin = Input(input_shape)
    n = Conv2d(64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init)(nin)
    temp = n

    # B residual blocks
    for i in range(16):
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
        nn = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(gamma_init=g_init)(nn)
        nn = Elementwise(tf.add)([n, nn])
        n = nn

    n = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(gamma_init=g_init)(n)
    n = Elementwise(tf.add)([n, temp])
    # B residual blacks end

    n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    n = Conv2d(256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    nn = Conv2d(3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init)(n)
    G = Model(inputs=nin, outputs=nn, name="generator")
    return G
Beispiel #2
0
def LapSRNSingleLevel(net_image, net_feature, reuse=False):
    with tf.variable_scope("Model_level", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        net_tmp = net_feature
        # recursive block
        for d in range(config.model.resblock_depth):
            net_tmp = PReluLayer(net_tmp, name='prelu_D%s' % (d))
            net_tmp = Conv2dLayer(
                net_tmp,
                shape=[3, 3, 64, 64],
                strides=[1, 1, 1, 1],
                name='conv_D%s' % (d),
                W_init=tf.contrib.layers.xavier_initializer())

        # for r in range(1,config.model.recursive_depth):
        #     for d in range(config.model.resblock_depth):
        #         net_tmp = PReluLayer(net_tmp, name='prelu_R%s_D%s'%(r,d))
        #         net_tmp = Conv2dLayer(net_tmp,shape=[3,3,64,64],strides=[1,1,1,1],
        #                 name='conv_R%s_D%s'%(r,d), W_init=tf.contrib.layers.xavier_initializer())

        net_feature = ElementwiseLayer(layer=[net_feature, net_tmp],
                                       combine_fn=tf.add,
                                       name='add_feature')

        net_feature = PReluLayer(net_feature, name='prelu_feature')
        net_feature = Conv2dLayer(
            net_feature,
            shape=[3, 3, 64, 256],
            strides=[1, 1, 1, 1],
            name='upconv_feature',
            W_init=tf.contrib.layers.xavier_initializer())
        net_feature = SubpixelConv2d(net_feature,
                                     scale=2,
                                     n_out_channel=64,
                                     name='subpixel_feature')

        # add image back
        gradient_level = Conv2dLayer(
            net_feature,
            shape=[3, 3, 64, 3],
            strides=[1, 1, 1, 1],
            act=lrelu,
            name='grad',
            W_init=tf.contrib.layers.xavier_initializer())
        net_image = Conv2dLayer(net_image,
                                shape=[3, 3, 3, 12],
                                strides=[1, 1, 1, 1],
                                name='upconv_image',
                                W_init=tf.contrib.layers.xavier_initializer())
        net_image = SubpixelConv2d(net_image,
                                   scale=2,
                                   n_out_channel=3,
                                   name='subpixel_image')
        net_image = ElementwiseLayer(layer=[gradient_level, net_image],
                                     combine_fn=tf.add,
                                     name='add_image')

    return net_image, net_feature, gradient_level
Beispiel #3
0
def upscale(layer,
            out_channels,
            out_size=None,
            scale=None,
            mode='upconv',
            name='upsale'):
    # if (out_size is None) and (scale is None):
    #     raise ValueError("At least one of out_size and scale must be non-None")

    if mode == 'upconv':
        if out_size is None:
            batch, height, width, _ = layer.outputs.get_shape()
            out_size = [int(height * scale), int(width * scale)]
        return upconv(layer, out_channels, out_size, filter_size=3, name=name)

    elif mode == 'deconv':
        return deconv2d(layer,
                        out_channels=out_channels,
                        out_size=out_size,
                        name='%sdeconv' % name)

    elif mode == 'subpixel':
        if (scale is None):
            raise ValueError('scale cannot be None when mode==subpixel')

        n = SubpixelConv2d(layer, scale=scale, name='%s/subpixel' % name)
        return conv2d(n,
                      n_filter=out_channels,
                      filter_size=3,
                      name='%s/conv' % name)

    else:
        raise ValueError('unknown mode: %s' % mode)
def Generator(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    layer_in = Input(input_shape)
    l = Conv2d(64, (3, 3), (1, 1),
               padding='SAME',
               act=tf.nn.relu,
               w_init=w_init)(layer_in)
    temp = l

    #Residual Blocks
    for i in range(16):
        nn = Conv2d(64, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init,
                    b_init=None)(l)
        nn = BatchNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
        #nn = PRelu(a_init = w_init)(nn)
        nn = Conv2d(64, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init,
                    b_init=None)(nn)
        nn = BatchNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Elementwise(tf.add)[l, nn]
        l = nn

    l = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init,
               b_init=None)(l)
    l = BatchNorm2d(gamma_init=g_init)(l)
    l = Elementwise(tf.add)[l, temp]

    l = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(l)
    l = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(l)

    l = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(l)
    l = SubpixelConv2d(scale=2, n_out_chanels=None, act=tf.nn.relu)(l)

    layer_out = Conv2d(3, (1, 1), (1, 1),
                       act=tf.nn.tanh,
                       padding='SAME',
                       W_init=w_init)(l)
    Generator = Model(inputs=layer_in, outputs=layer_out, name='generator')

    return Generator
Beispiel #5
0
def get_G(input_shape):

    w_init = tf.random_normal_initializer(stddev=0.02)
    nin = Input(input_shape)
    n = Conv2d(64, (3, 3), (1, 1),
               act=tf.nn.relu,
               padding='SAME',
               W_init=w_init)(nin)
    '''上采样后直接加到网络末尾'''
    up_bicubic = tl.layers.UpSampling2d((4, 4), method='bicubic')(nin)
    temp = n
    # B residual blocks
    for i in range(16):
        nn = Conv2d(64, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init,
                    b_init=None)(n)
        nn = Conv2d(64, (3, 3), (1, 1),
                    padding='SAME',
                    W_init=w_init,
                    b_init=None)(nn)
        nn = Elementwise(tf.add)([n, nn])
        n = nn
    n = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init,
               b_init=None)(n)
    n = Elementwise(tf.add)([n, temp])
    # B residual blacks end

    n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)
    n = Conv2d(256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)
    nn = Conv2d(3, (1, 1), (1, 1),
                act=tf.nn.tanh,
                padding='SAME',
                W_init=w_init)(n)

    nn = Elementwise(tf.add)([nn, up_bicubic])
    G = Model(inputs=nin, outputs=nn)

    return G
Beispiel #6
0
 def __init__(self):
     super(SRresnet,self).__init__()
     w_init = tf.random_normal_initializer(stddev=0.02)
     g_init = tf.random_normal_initializer(1., 0.02)
     self.conv1 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=3, act=tf.nn.relu, padding='SAME', W_init=w_init,b_init=None)
     self.conv2 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.conv3 = Conv2d(n_filter=256,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.conv4 = Conv2d(n_filter=3,filter_size=(3, 3),strides=(1, 1),in_channels=64, act=tf.nn.tanh, padding='SAME', W_init=w_init,b_init=None)
     self.bn2 = BatchNorm2d(num_features = 64,gamma_init=g_init)
     self.bn1 = BatchNorm2d(num_features = 64,gamma_init=g_init, act=tf.nn.relu)
     self.subconv1 = SubpixelConv2d(scale=2, n_out_channels=256,in_channels=256, act=tf.nn.relu)
     self.add1 = Elementwise(tf.add)
Beispiel #7
0
def model_G2():  ##Phase2 Generator

    gamma_init = tf1.random_normal_initializer(1., 0.02)
    w_init = tf1.random_normal_initializer(stddev=0.02)
    fn = tf1.nn.relu

    ##	Input layers
    lr_image = Input(
        (None, 128, 128, 3))  ##	(batch_size, height, width, channel)
    hr_image = Input((None, 512, 512, 3))

    ## 	Feature extracting layers from LR image
    lr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1),
                                act=fn,
                                padding='SAME',
                                W_init=w_init)(lr_image)  # Shape(1,256,256,64)
    lr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(lr_feature_layer_1)

    lr_feature_layer_2 = SubpixelConv2d(scale=4, act=fn)(
        lr_feature_layer_1)  # Shape(1,256,256,16)

    ##	Feature extracting layers from HR image

    hr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1),
                                act=fn,
                                padding='SAME',
                                W_init=w_init)(hr_image)  # Shape(1,256,256,64)
    hr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(hr_feature_layer_1)

    ##	Features Merging layers

    merge_layer = Concat(concat_dim=-1)(
        [lr_feature_layer_2, hr_feature_layer_1])  # Shape(1,256,256,128)

    non_linearity_layer_1 = Conv2d(64, (5, 5), (1, 1),
                                   act=fn,
                                   padding='SAME',
                                   W_init=w_init)(
                                       merge_layer)  # Shape(1,256,256,256)
    non_linearity_layer_1 = BatchNorm2d(
        gamma_init=gamma_init)(non_linearity_layer_1)

    ## 	Reconstruction layers
    Recon_layer_1 = Conv2d(3, (5, 5), (1, 1),
                           act=fn,
                           padding='SAME',
                           W_init=w_init)(
                               non_linearity_layer_1)  # Shape(1,256,256,1)
    Recon_layer_2 = Elementwise(combine_fn=tf1.add)([Recon_layer_1, hr_image
                                                     ])  # Shape(1,256,256,1)

    return Model(inputs=[lr_image, hr_image], outputs=Recon_layer_2)
Beispiel #8
0
def get_espcn(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    nin = Input(input_shape)
    n = Conv2d(64, (5, 5), (1, 1),
               act=tf.nn.relu,
               padding='SAME',
               W_init=w_init)(nin)
    n = Conv2d(32, (3, 3), (1, 1),
               act=tf.nn.relu,
               padding='SAME',
               W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)
    n = Conv2d(48, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)
    nn = Conv2d(3, (1, 1), (1, 1),
                act=tf.nn.tanh,
                padding='SAME',
                W_init=w_init)(n)
    G = Model(inputs=nin, outputs=nn, name="generator_espcn")
    return G
    raise Exception("shape dont match")

if len(n.all_layers) != 2:
    raise Exception("layers dont match")

if len(n.all_params) != 2:
    raise Exception("params dont match")

if n.count_params() != 416:
    raise Exception("params dont match")

## 2D
x = tf.placeholder('float32', [10, 100, 100, 3], name='x')
n = InputLayer(x, name='in')
n = Conv2d(n, 32, (3, 2), (1, 1), padding='SAME', name='conv2d')
n = SubpixelConv2d(n, scale=2, name='subpixel2d')
print(n.outputs.shape)

n.print_layers()
n.print_params(False)

shape = n.outputs.get_shape().as_list()
if shape != [10, 200, 200, 8]:
    raise Exception("shape dont match")

if len(n.all_layers) != 2:
    raise Exception("layers dont match")

if len(n.all_params) != 2:
    raise Exception("params dont match")
Beispiel #10
0
def SRGAN_g(t_image, is_train=False, reuse=False):
    '''
    Build the generator
    '''
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = tf.constant_initializer(value=0.0)
    g_init = tf.random_normal_initializer(1., 0.02)
    with tf.variable_scope("SRGAN_g", reuse=reuse) as vs:

        n = InputLayer(t_image, name='in')
        n = Conv2d(n,
                   64, (3, 3), (1, 1),
                   act=tf.nn.relu,
                   padding='SAME',
                   W_init=w_init,
                   name='n64s1/c')
        temp = n

        # 16 residual blocks
        for i in range(16):
            nn = Conv2d(n,
                        64, (3, 3), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='n64s1/c1/%s' % i)
            nn = BatchNormLayer(nn,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=g_init,
                                name='n64s1/b1/%s' % i)
            nn = Conv2d(nn,
                        64, (3, 3), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='n64s1/c2/%s' % i)
            nn = BatchNormLayer(nn,
                                is_train=is_train,
                                gamma_init=g_init,
                                name='n64s1/b2/%s' % i)
            nn = ElementwiseLayer([n, nn],
                                  tf.add,
                                  name='b_residual_add/%s' % i)
            n = nn

        n = Conv2d(n,
                   64, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='n64s1/c/m')
        n = BatchNormLayer(n,
                           is_train=is_train,
                           gamma_init=g_init,
                           name='n64s1/b/m')
        n = ElementwiseLayer([n, temp], tf.add, name='add3')
        # 16 residual blacks end

        n = Conv2d(n,
                   256, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='n256s1/1')
        n = SubpixelConv2d(n,
                           scale=2,
                           n_out_channel=None,
                           act=tf.nn.relu,
                           name='pixelshufflerx2/1')

        # n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/2')
        # n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/2')

        n = Conv2d(n,
                   3, (1, 1), (1, 1),
                   act=tf.nn.tanh,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='out')
        return n
Beispiel #11
0
 def get_conv_block(n, w_init):
     n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
     n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)
     return n
Beispiel #12
0
 def _phase_shift(x):
     n = InputLayer(x, name='input_subpixel')
     n = SubpixelConv2d(n, scale=scale, n_out_channel=None, act=tf.nn.relu)
     return n.outputs
Beispiel #13
0
def UNet_A(lf_extra,
           n_slices,
           output_size,
           is_train=True,
           reuse=False,
           name='unet'):
    '''U-net based VCD-Net for light field reconstruction.
    Params:
        lf_extra: tf.tensor 
            In shape of [batch, height, width, n_num^2], the extracted views from the light field image
        n_slices: int
            The slices number of the 3-D reconstruction.
        output_size: list of int
            Lateral size of the 3-D reconstruction, i.e., [height, width].
        is_train: boolean 
            Sees tl.layers.BatchNormLayer.
        reuse: boolean 
            Whether to reuse the variables or not. See tf.variable_scope() for details.
        name: string
            The name of the variable scope.
    Return:
        The 3-D reconstruction in shape of [batch, height, width, depth=n_slices]
    '''
    n_interp = 4
    # _, w, h, _ = lf_extra.shape
    #channels_interp = in_channels.value
    channels_interp = 128
    act = tf.nn.relu

    with tf.variable_scope(name, reuse=reuse):
        n = InputLayer(lf_extra, 'lf_extra')
        n = conv2d(n, n_filter=channels_interp, filter_size=7, name='conv1')

        ## Up-scale input
        with tf.variable_scope('interp'):
            for i in range(n_interp):
                channels_interp = channels_interp / 2
                n = SubpixelConv2d(n, scale=2, name='interp/subpixel%d' % i)
                n = conv2d(n,
                           n_filter=channels_interp,
                           filter_size=3,
                           name='conv%d' % i)

            n = conv2d(n,
                       n_filter=channels_interp,
                       filter_size=3,
                       name='conv_final')  # 176*176
            n = batch_norm(n, is_train=is_train, name='bn_final')
            n = ReluLayer(n, name='reul_final')

        pyramid_channels = [
            128, 256, 512, 512, 512
        ]  # output channels number of each conv layer in the encoder
        encoder_layers = []
        with tf.variable_scope('encoder'):
            n = conv2d(n, n_filter=64, filter_size=3, stride=1, name='conv0')
            n = batch_norm(n, is_train=is_train, name='bn_0')
            n = ReluLayer(n, name='reul0')

            for idx, nc in enumerate(pyramid_channels):
                encoder_layers.append(
                    n
                )  # append n0, n1, n2, n3, n4 (but without n5)to the layers list
                print('encoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = conv2d(n,
                           n_filter=nc,
                           filter_size=3,
                           stride=1,
                           name='conv%d' % (idx + 1))
                n = batch_norm(n, is_train=is_train, name='bn%d' % (idx + 1))
                n = ReluLayer(n, name='reul%d' % (idx + 1))
                n1 = PadDepth(encoder_layers[-1], desired_channels=nc)
                n = merge([n, n1], name='add%d' % (idx + 1))
                n = tl.layers.MaxPool2d(n,
                                        filter_size=(3, 3),
                                        strides=(2, 2),
                                        name='maxplool%d' % (idx + 1))

        nl = len(encoder_layers)
        with tf.variable_scope('decoder'):
            _, h, w, _ = encoder_layers[-1].outputs.shape.as_list()
            n = UpSampling2dLayer(n,
                                  size=(h, w),
                                  is_scale=False,
                                  name='upsamplimg')

            for idx in range(nl - 1, -1, -1):  # idx = 4,3,2,1,0
                if idx > 0:
                    _, h, w, _ = encoder_layers[idx -
                                                1].outputs.shape.as_list()
                    out_size = (h, w)
                    out_channels = pyramid_channels[idx - 1]
                else:
                    #out_size = None
                    out_channels = n_slices

                print('decoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = ConcatLayer([encoder_layers[idx], n],
                                concat_dim=-1,
                                name='concat%d' % (nl - idx))
                n = conv2d(n,
                           out_channels,
                           filter_size=3,
                           stride=1,
                           name='conv%d' % (nl - idx + 1))
                n = ReluLayer(n, name='relu%d' % (nl - idx + 1))
                n = batch_norm(n,
                               is_train=is_train,
                               name='bn%d' % (nl - idx + 1))
                #n = UpConv(n, 512, filter_size=4, factor=2, name='upconv2')
                n = UpSampling2dLayer(n,
                                      size=out_size,
                                      is_scale=False,
                                      name='upsamplimg%d' % (nl - idx + 1))

                #n = DropoutLayer(n, keep=0.5, is_fix=True, is_train=is_train, name='dropout1')

            if n.outputs.shape[1] != output_size[0]:
                n = UpSampling2dLayer(n,
                                      size=output_size,
                                      is_scale=False,
                                      name='resize_final')
            #n = conv2d(n, n_slices, filter_size=3, stride=1,name='conv_final' )
            n.outputs = tf.tanh(n.outputs)
            #n.outputs = tf.nn.relu(n.outputs)
            #n = conv2d(n, n_filter=n_slices, filter_size=3, act=tf.tanh, name='out')
            return n