Пример #1
0
def denseConvlayer(layer_inputs, bottleneck_scale, growth_rate, is_training):
    # Build the bottleneck operation
    net = layer_inputs
    net_temp = tf.identity(net)
    net = batchnorm(net, is_training)
    net = prelu_tf(net, name='Prelu_1')
    net = conv2(net,
                kernel=1,
                output_channel=bottleneck_scale * growth_rate,
                stride=1,
                use_bias=False,
                scope='conv1x1')
    net = batchnorm(net, is_training)
    net = prelu_tf(net, name='Prelu_2')
    net = conv2(net,
                kernel=3,
                output_channel=growth_rate,
                stride=1,
                use_bias=False,
                scope='conv3x3')

    # Concatenate the processed feature to the feature
    net = tf.concat([net_temp, net], axis=3)

    return net
Пример #2
0
def generatorDense(gen_inputs, gen_output_channels, reuse=False, is_training=None):
    # The main netowrk
    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input stage
        with tf.variable_scope('input_stage'):
            net = conv2(gen_inputs, 9, 64, 1, scope='conv')
            net = prelu_tf(net)

        # The dense block part
        # Define the denseblock configuration
        layer_per_block = 16
        bottleneck_scale = 4
        growth_rate = 12
        transition_output_channel = 128
        with tf.variable_scope('denseBlock_1'):
            net = denseBlock(net, layer_per_block, bottleneck_scale, growth_rate, is_training)

        with tf.variable_scope('transition_layer_1'):
            net = transitionLayer(net, transition_output_channel, is_training)

        with tf.variable_scope('subpixelconv_stage1'):
            net = conv2(net, 3, 256, 1, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)

        with tf.variable_scope('subpixelconv_stage2'):
            net = conv2(net, 3, 256, 1, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)

        with tf.variable_scope('output_stage'):
            net = conv2(net, 9, gen_output_channels, 1, scope='conv')

        return net
Пример #3
0
def fnet(fnet_input, reuse=False):
    def down_block(inputs, output_channel=64, stride=1, scope='down_block'):
        with tf.compat.v1.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_1')
            net = lrelu(net, 0.2)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_2')
            net = lrelu(net, 0.2)
            net = maxpool(net)

        return net

    def up_block(inputs, output_channel=64, stride=1, scope='up_block'):
        with tf.compat.v1.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_1')
            net = lrelu(net, 0.2)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_2')
            net = lrelu(net, 0.2)
            new_shape = tf.shape(net)[1:-1] * 2
            net = tf.image.resize(net, new_shape)

        return net

    with tf.compat.v1.variable_scope('autoencode_unit', reuse=reuse):
        net = down_block(fnet_input, 32, scope='encoder_1')
        net = down_block(net, 64, scope='encoder_2')
        net = down_block(net, 128, scope='encoder_3')

        net = up_block(net, 256, scope='decoder_1')
        net = up_block(net, 128, scope='decoder_2')
        net1 = up_block(net, 64, scope='decoder_3')

        with tf.compat.v1.variable_scope('output_stage'):
            net = conv2(net1, 3, 32, 1, scope='conv1')
            net = lrelu(net, 0.2)
            net2 = conv2(net, 3, 2, 1, scope='conv2')
            net = tf.tanh(net2) * 24.0
            # the 24.0 is the max Velocity, details can be found in TecoGAN paper
    return net
Пример #4
0
def generator_F(gen_inputs, gen_output_channels, reuse=False, FLAGS=None):
    # Check the flag
    if FLAGS is None:
        raise ValueError('No FLAGS is provided for generator')

    # The Bx residual blocks
    def residual_block(inputs, output_channel=64, stride=1, scope='res_block'):
        with tf.compat.v1.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_1')
            net = tf.nn.relu(net)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_2')
            net = net + inputs

        return net

    with tf.compat.v1.variable_scope('generator_unit', reuse=reuse):
        # The input layer
        with tf.compat.v1.variable_scope('input_stage'):
            net = conv2(gen_inputs, 3, 64, 1, scope='conv')
            stage1_output = tf.nn.relu(net)

        net = stage1_output

        # The residual block parts
        for i in range(1, FLAGS.num_resblock + 1,
                       1):  # should be 16 for TecoGAN, and 10 for TecoGANmini
            name_scope = 'resblock_%d' % (i)
            net = residual_block(net, 64, 1, name_scope)

        with tf.compat.v1.variable_scope('conv_tran2highres'):
            net = conv2_tran(net, 3, 64, 2, scope='conv_tran1')
            net = tf.nn.relu(net)

            net = conv2_tran(net, 3, 64, 2, scope='conv_tran2')
            net = tf.nn.relu(net)

        with tf.compat.v1.variable_scope('output_stage'):
            net = conv2(net, 3, gen_output_channels, 1, scope='conv')
            low_res_in = gen_inputs[:, :, :, 0:3]  # ignore warped pre high res
            # for tensoflow API<=1.13, bicubic_four is equivalent to the followings:
            # hi_shape = tf.shape( net )
            # bicubic_hi = tf.image.resize_bicubic( low_res_in, (hi_shape[1], hi_shape[2])) # no GPU implementation
            bicubic_hi = bicubic_four(low_res_in)  # can put on GPU
            net = net + bicubic_hi
            net = preprocess(net)
    return net
Пример #5
0
def transitionLayer(layer_inputs, output_channel, is_training):
    net = layer_inputs
    # net = batchnorm(net, is_training)
    net = prelu_tf(net)
    net = conv2(net, 1, output_channel, stride=1, use_bias=False, scope='conv1x1')

    return net
Пример #6
0
 def residual_block(inputs, output_channel, stride, scope):
     with tf.variable_scope(scope):
         net = conv2(inputs,
                     3,
                     output_channel,
                     stride,
                     use_bias=False,
                     scope='conv_1')
         net = prelu_tf(net)
         net = conv2(net,
                     3,
                     output_channel,
                     stride,
                     use_bias=False,
                     scope='conv_2')
         net = net + inputs
     return net
Пример #7
0
    def residual_block(inputs, output_channel=64, stride=1, scope='res_block'):
        with tf.compat.v1.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_1')
            net = tf.nn.relu(net)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_2')
            net = net + inputs

        return net
Пример #8
0
def discriminator(dis_inputs, is_training=True):
    # Define the discriminator block
    def discriminator_block(inputs, output_channel, kernel_size, stride,
                            scope):
        with tf.variable_scope(scope):
            net = conv2(inputs,
                        kernel_size,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv1')
            # net = batchnorm(net, is_training)
            net = lrelu(net, 0.2)

        return net

    with tf.variable_scope('discriminator_unit'):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = conv2(dis_inputs, 3, 64, 1, scope='conv')
            net = lrelu(net, 0.2)

        # The discriminator block part
        # block 1
        net = discriminator_block(net, 64, 3, 2, 'disblock_1')

        # block 2
        net = discriminator_block(net, 128, 3, 1, 'disblock_2')

        # block 3
        net = discriminator_block(net, 128, 3, 2, 'disblock_3')

        # block 4
        net = discriminator_block(net, 256, 3, 1, 'disblock_4')

        # block 5
        net = discriminator_block(net, 256, 3, 2, 'disblock_5')

        # block 6
        net = discriminator_block(net, 512, 3, 1, 'disblock_6')

        # block_7
        net = discriminator_block(net, 512, 3, 2, 'disblock_7')

        # The dense layer 1
        with tf.variable_scope('dense_layer_1'):
            net = slim.flatten(net)
            net = denselayer(net, 1024)
            net = lrelu(net, 0.2)

        # The dense layer 2
        with tf.variable_scope('dense_layer_2'):
            logits = denselayer(net, 1)
            prob = tf.nn.sigmoid(logits)

    return logits, prob
Пример #9
0
    def down_block(inputs, output_channel=64, stride=1, scope='down_block'):
        with tf.compat.v1.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_1')
            net = lrelu(net, 0.2)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_2')
            net = lrelu(net, 0.2)
            net = maxpool(net)

        return net
Пример #10
0
    def up_block(inputs, output_channel=64, stride=1, scope='up_block'):
        with tf.compat.v1.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_1')
            net = lrelu(net, 0.2)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=True,
                        scope='conv_2')
            net = lrelu(net, 0.2)
            new_shape = tf.shape(net)[1:-1] * 2
            net = tf.image.resize(net, new_shape)

        return net
Пример #11
0
    def discriminator_block(inputs, output_channel, kernel_size, stride,
                            scope):
        with tf.variable_scope(scope):
            net = conv2(inputs,
                        kernel_size,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv1')
            # net = batchnorm(net, is_training)
            net = lrelu(net, 0.2)

        return net
Пример #12
0
def discriminatorDense(dis_inputs, is_training=True):
    # Define the discriminator block
    def discriminator_block(inputs, output_channel, kernel_size, stride, scope):
        with tf.variable_scope(scope):
            net = conv2(inputs, kernel_size, output_channel, stride, use_bias=False, scope='conv1')
            # net = batchnorm(net, is_training)
            net = lrelu(net, 0.2)
        return net

    with tf.variable_scope('discriminator_unit'):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = conv2(dis_inputs, 3, 64, 1, scope='conv')
            net = lrelu(net, 0.2)

        # The discriminator block part
        # block 1
        net = discriminator_block(net, 64, 3, 2, 'disblock_1')

        # block 2
        net = discriminator_block(net, 64, 3, 2, 'disblock_2')

        # block 3
        net = discriminator_block(net, 64, 3, 1, 'disblock_3')

        # The dense block part
        # Define the denseblock configuration
        layer_per_block = 8
        bottleneck_scale = 4
        growth_rate = 12
        transition_output_channel = 128
        with tf.variable_scope('denseBlock_1'):
            net = denseBlock(net, layer_per_block, bottleneck_scale, growth_rate, is_training)

        with tf.variable_scope('transition_layer_1'):
            net = transitionLayer(net, transition_output_channel, is_training)

        # The dense layer 1
        with tf.variable_scope('dense_layer_1'):
            net = slim.flatten(net)
            net = denselayer(net, 1024)
            net = lrelu(net, 0.2)

        # The dense layer 2
        with tf.variable_scope('dense_layer_2'):
            net = denselayer(net, 1)
            net = tf.nn.sigmoid(net)
    return net
Пример #13
0
def generator_split(gen_inputs,
                    gen_output_channels,
                    num_resblock=16,
                    reuse=False,
                    is_training=None):
    # The Bx residual blocks
    def residual_block(inputs, output_channel, stride, scope):
        with tf.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_1')
            net = prelu_tf(net)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_2')
            net = net + inputs
        return net

    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = conv2(gen_inputs, 9, 64, 1, scope='conv')
            net = prelu_tf(net)
        stage1_output = net
        # The residual block parts
        for i in range(1, num_resblock + 1, 1):
            name_scope = 'resblock_%d' % (i)
            net = residual_block(net, 64, 1, name_scope)
        with tf.variable_scope('resblock_output'):
            net = conv2(net, 3, 64, 1, use_bias=False, scope='conv')
        net = net + stage1_output
    inputs_top = tf.slice(net, [0, 0, 0, 0], [-1, 17, -1, -1])
    inputs_down = tf.slice(net, [0, 15, 0, 0], [-1, -1, -1, -1])
    with tf.variable_scope('generator_unit_1', reuse=reuse):
        with tf.variable_scope('subpixelconv_stage1'):
            net = relate_conv(inputs_top, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('subpixelconv_stage2'):
            net = relate_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net_top = prelu_tf(net)
    with tf.variable_scope('generator_unit_2', reuse=reuse):
        with tf.variable_scope('subpixelconv_stage1'):
            net = relate_conv(inputs_down, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('subpixelconv_stage2'):
            net = relate_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net_down = prelu_tf(net)
    net = tf.concat([
        tf.slice(net_top, [0, 0, 0, 0], [-1, 64, -1, -1]),
        tf.slice(net_down, [0, 4, 0, 0], [-1, -1, -1, -1])
    ],
                    axis=1)
    with tf.variable_scope('output_stage'):
        net = conv2(net, 9, gen_output_channels, 1, scope='conv')
        net = tf.nn.tanh(net)
    return net
Пример #14
0
def generator(gen_inputs,
              gen_output_channels,
              num_resblock=16,
              reuse=False,
              is_training=None):
    # The Bx residual blocks
    def residual_block(inputs, output_channel, stride, scope):
        with tf.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_1')
            # net = batchnorm(net, is_training)
            net = prelu_tf(net)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_2')
            # net = batchnorm(net, is_training)
            net = net + inputs
        return net

    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = conv2(gen_inputs, 9, 64, 1, scope='conv')
            net = prelu_tf(net)
        stage1_output = net
        # The residual block parts
        for i in range(1, num_resblock + 1, 1):
            name_scope = 'resblock_%d' % (i)
            net = residual_block(net, 64, 1, name_scope)
        with tf.variable_scope('resblock_output'):
            net = conv2(net, 3, 64, 1, use_bias=False, scope='conv')
            # net = batchnorm(net, is_training)
        net = net + stage1_output
        with tf.variable_scope('subpixelconv_stage1'):
            # net = conv2(net, 3, 256, 1, scope='conv')
            net = subpixel_pre(net,
                               input_channel=64,
                               output_channel=256,
                               scope='conv')
            # net = relate_conv(net, 64, 64, scope='conv')
            # net = interpolation_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('subpixelconv_stage2'):
            # net = conv2(net, 3, 256, 1, scope='conv')
            net = subpixel_pre(net,
                               input_channel=64,
                               output_channel=256,
                               scope='conv')
            # net = relate_conv(net, 64, 64, scope='conv')
            # net = interpolation_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('output_stage'):
            net = conv2(net, 9, gen_output_channels, 1, scope='conv')
            # net = tf.nn.tanh(net)
    return net