示例#1
0
def denseConvlayer(layer_inputs, bottleneck_scale, growth_rate, is_training):
    # Build the bottleneck operation
    net = layer_inputs
    net_temp = tf.identity(net)
    net = batchnorm(net, is_training)
    net = prelu_tf(net, name='Prelu_1')
    net = conv2(net,
                kernel=1,
                output_channel=bottleneck_scale * growth_rate,
                stride=1,
                use_bias=False,
                scope='conv1x1')
    net = batchnorm(net, is_training)
    net = prelu_tf(net, name='Prelu_2')
    net = conv2(net,
                kernel=3,
                output_channel=growth_rate,
                stride=1,
                use_bias=False,
                scope='conv3x3')

    # Concatenate the processed feature to the feature
    net = tf.concat([net_temp, net], axis=3)

    return net
示例#2
0
def generatorDense(gen_inputs, gen_output_channels, reuse=False, is_training=None):
    # The main netowrk
    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input stage
        with tf.variable_scope('input_stage'):
            net = conv2(gen_inputs, 9, 64, 1, scope='conv')
            net = prelu_tf(net)

        # The dense block part
        # Define the denseblock configuration
        layer_per_block = 16
        bottleneck_scale = 4
        growth_rate = 12
        transition_output_channel = 128
        with tf.variable_scope('denseBlock_1'):
            net = denseBlock(net, layer_per_block, bottleneck_scale, growth_rate, is_training)

        with tf.variable_scope('transition_layer_1'):
            net = transitionLayer(net, transition_output_channel, is_training)

        with tf.variable_scope('subpixelconv_stage1'):
            net = conv2(net, 3, 256, 1, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)

        with tf.variable_scope('subpixelconv_stage2'):
            net = conv2(net, 3, 256, 1, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)

        with tf.variable_scope('output_stage'):
            net = conv2(net, 9, gen_output_channels, 1, scope='conv')

        return net
示例#3
0
def transitionLayer(layer_inputs, output_channel, is_training):
    net = layer_inputs
    # net = batchnorm(net, is_training)
    net = prelu_tf(net)
    net = conv2(net, 1, output_channel, stride=1, use_bias=False, scope='conv1x1')

    return net
示例#4
0
 def residual_block(inputs, output_channel, stride, scope):
     with tf.variable_scope(scope):
         net = conv2(inputs,
                     3,
                     output_channel,
                     stride,
                     use_bias=False,
                     scope='conv_1')
         net = prelu_tf(net)
         net = conv2(net,
                     3,
                     output_channel,
                     stride,
                     use_bias=False,
                     scope='conv_2')
         net = net + inputs
     return net
示例#5
0
 def residual_block(inputs, output_channels, stride, scope):
     with tf.variable_scope(scope):
         net = ops.conv3d(inputs,
                          3,
                          output_channels,
                          stride,
                          use_bias=False,
                          scope='conv_1')
         if (FLAGS.GAN_type == 'GAN'):
             net = ops.batchnorm(net, FLAGS.is_training)
         net = ops.prelu_tf(net)
         net = ops.conv3d(net,
                          3,
                          output_channels,
                          stride,
                          use_bias=False,
                          scope='conv_2')
         if (FLAGS.GAN_type == 'GAN'):
             net = ops.batchnorm(net, FLAGS.is_training)
         net = net + inputs
     return net
示例#6
0
def generator(gen_inputs, gen_output_channels, reuse=False, FLAGS=None):
    # Check the flag
    if FLAGS is None:
        raise ValueError('No FLAGS is provided for generator')

    # The Bx residual blocks
    def residual_block(inputs, output_channels, stride, scope):
        with tf.variable_scope(scope):
            net = ops.conv3d(inputs,
                             3,
                             output_channels,
                             stride,
                             use_bias=False,
                             scope='conv_1')
            if (FLAGS.GAN_type == 'GAN'):
                net = ops.batchnorm(net, FLAGS.is_training)
            net = ops.prelu_tf(net)
            net = ops.conv3d(net,
                             3,
                             output_channels,
                             stride,
                             use_bias=False,
                             scope='conv_2')
            if (FLAGS.GAN_type == 'GAN'):
                net = ops.batchnorm(net, FLAGS.is_training)
            net = net + inputs
        return net

    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = ops.conv3d(gen_inputs, 9, 64, 1, scope='conv')
            net = ops.prelu_tf(net)

        stage1_output = net

        # The residual block parts
        for i in range(1, FLAGS.num_resblock + 1, 1):
            name_scope = 'resblock_%d' % (i)
            net = residual_block(net, 64, 1, name_scope)

        with tf.variable_scope('resblock_output'):
            net = ops.conv3d(net, 3, 64, 1, use_bias=False, scope='conv')
            if (FLAGS.GAN_type == 'GAN'):
                net = ops.batchnorm(net, FLAGS.is_training)

        net = net + stage1_output

        with tf.variable_scope('subpixelconv_stage1'):
            net = ops.conv3d(net, 3, 256, 1, scope='conv')
            net = ops.pixelShuffler(net, scale=2)
            net = ops.prelu_tf(net)

        with tf.variable_scope('subpixelconv_stage2'):
            net = ops.conv3d(net, 3, 256, 1, scope='conv')
            net = ops.pixelShuffler(net, scale=2)
            net = ops.prelu_tf(net)

        with tf.variable_scope('output_stage'):
            net = ops.conv3d(net, 9, gen_output_channels, 1, scope='conv')

    return net
示例#7
0
def generator_split(gen_inputs,
                    gen_output_channels,
                    num_resblock=16,
                    reuse=False,
                    is_training=None):
    # The Bx residual blocks
    def residual_block(inputs, output_channel, stride, scope):
        with tf.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_1')
            net = prelu_tf(net)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_2')
            net = net + inputs
        return net

    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = conv2(gen_inputs, 9, 64, 1, scope='conv')
            net = prelu_tf(net)
        stage1_output = net
        # The residual block parts
        for i in range(1, num_resblock + 1, 1):
            name_scope = 'resblock_%d' % (i)
            net = residual_block(net, 64, 1, name_scope)
        with tf.variable_scope('resblock_output'):
            net = conv2(net, 3, 64, 1, use_bias=False, scope='conv')
        net = net + stage1_output
    inputs_top = tf.slice(net, [0, 0, 0, 0], [-1, 17, -1, -1])
    inputs_down = tf.slice(net, [0, 15, 0, 0], [-1, -1, -1, -1])
    with tf.variable_scope('generator_unit_1', reuse=reuse):
        with tf.variable_scope('subpixelconv_stage1'):
            net = relate_conv(inputs_top, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('subpixelconv_stage2'):
            net = relate_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net_top = prelu_tf(net)
    with tf.variable_scope('generator_unit_2', reuse=reuse):
        with tf.variable_scope('subpixelconv_stage1'):
            net = relate_conv(inputs_down, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('subpixelconv_stage2'):
            net = relate_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net_down = prelu_tf(net)
    net = tf.concat([
        tf.slice(net_top, [0, 0, 0, 0], [-1, 64, -1, -1]),
        tf.slice(net_down, [0, 4, 0, 0], [-1, -1, -1, -1])
    ],
                    axis=1)
    with tf.variable_scope('output_stage'):
        net = conv2(net, 9, gen_output_channels, 1, scope='conv')
        net = tf.nn.tanh(net)
    return net
示例#8
0
def generator(gen_inputs,
              gen_output_channels,
              num_resblock=16,
              reuse=False,
              is_training=None):
    # The Bx residual blocks
    def residual_block(inputs, output_channel, stride, scope):
        with tf.variable_scope(scope):
            net = conv2(inputs,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_1')
            # net = batchnorm(net, is_training)
            net = prelu_tf(net)
            net = conv2(net,
                        3,
                        output_channel,
                        stride,
                        use_bias=False,
                        scope='conv_2')
            # net = batchnorm(net, is_training)
            net = net + inputs
        return net

    with tf.variable_scope('generator_unit', reuse=reuse):
        # The input layer
        with tf.variable_scope('input_stage'):
            net = conv2(gen_inputs, 9, 64, 1, scope='conv')
            net = prelu_tf(net)
        stage1_output = net
        # The residual block parts
        for i in range(1, num_resblock + 1, 1):
            name_scope = 'resblock_%d' % (i)
            net = residual_block(net, 64, 1, name_scope)
        with tf.variable_scope('resblock_output'):
            net = conv2(net, 3, 64, 1, use_bias=False, scope='conv')
            # net = batchnorm(net, is_training)
        net = net + stage1_output
        with tf.variable_scope('subpixelconv_stage1'):
            # net = conv2(net, 3, 256, 1, scope='conv')
            net = subpixel_pre(net,
                               input_channel=64,
                               output_channel=256,
                               scope='conv')
            # net = relate_conv(net, 64, 64, scope='conv')
            # net = interpolation_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('subpixelconv_stage2'):
            # net = conv2(net, 3, 256, 1, scope='conv')
            net = subpixel_pre(net,
                               input_channel=64,
                               output_channel=256,
                               scope='conv')
            # net = relate_conv(net, 64, 64, scope='conv')
            # net = interpolation_conv(net, 64, 64, scope='conv')
            net = pixelShuffler(net, scale=2)
            net = prelu_tf(net)
        with tf.variable_scope('output_stage'):
            net = conv2(net, 9, gen_output_channels, 1, scope='conv')
            # net = tf.nn.tanh(net)
    return net