Пример #1
0
def build_resnet_block_ds(inputres, dim_in, dim_out, name="resnet", padding="REFLECT", norm_type=None, is_training=True, keep_rate=0.75):

    with tf.variable_scope(name):
        out_res = tf.pad(inputres, [[0, 0], [1, 1], [1, 1], [0, 0]], padding)
        out_res = layers.general_conv2d(out_res, dim_out, 3, 3, 1, 1, 0.01, "VALID", "c1", norm_type=norm_type, is_training=is_training, keep_rate=keep_rate)
        out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], padding)
        out_res = layers.general_conv2d(out_res, dim_out, 3, 3, 1, 1, 0.01, "VALID", "c2", do_relu=False, norm_type=norm_type, is_training=is_training, keep_rate=keep_rate)

        inputres = tf.pad(inputres, [[0, 0], [0, 0], [0, 0], [(dim_out - dim_in) // 2, (dim_out - dim_in) // 2]], padding)

        return tf.nn.relu(out_res + inputres)
Пример #2
0
def build_generator_resnet_9blocks(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1")
        o_c2 = layers.general_conv2d(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                     "c2")
        o_c3 = layers.general_conv2d(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME",
                                     "c3")

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #3
0
def build_segmenternew(inputse, name='segmenter',keep_rate=0.75):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        
        o_c1 = layers.general_conv2d(inputse, ngf * 4, ks, ks, 1, 1, 0.02, "SAME", "c1", norm_type='Ins',keep_rate=keep_rate)
        o_r1 = build_resnet_block(o_c1, ngf * 4, "r1", padding, norm_type='Ins')
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding, norm_type='Ins')
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding, norm_type='Ins')
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding, norm_type='Ins')
        o_c3 = layers.general_deconv2d(o_r4, [BATCH_SIZE, 64, 64, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')
        o_c4 = layers.general_deconv2d(o_c3, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d(o_c5, 4, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        return o_c6
Пример #4
0
def build_segmenter(inputse, name='segmenter', keep_rate=0.75):
    with tf.variable_scope(name):

        k1 = 1

        o_c8 = layers.general_conv2d(inputse, 5, k1, k1, 1, 1, 0.01, 'SAME', 'c8', do_norm=False, do_relu=False, keep_rate=keep_rate)
        out_seg = tf.image.resize_images(o_c8, (256, 256))

        return out_seg
Пример #5
0
def build_resnet_block(inputres, dim, name="resnet"):

    with tf.variable_scope(name):

        out_res = tf.pad(inputres, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        out_res = general_conv2d(out_res, dim, 3, 3, 1, 1, 0.02, "VALID", "c1")
        out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        out_res = general_conv2d(out_res,
                                 dim,
                                 3,
                                 3,
                                 1,
                                 1,
                                 0.02,
                                 "VALID",
                                 "c2",
                                 do_relu=False)

        return tf.nn.relu(out_res + inputres)
Пример #6
0
def discriminator_tf(inputdisc, name="discriminator"):
    with tf.variable_scope(name):
        f = 4

        o_c1 = layers.general_conv2d(inputdisc, ndf, f, f, 2, 2,
                                     0.02, "SAME", "c1", do_norm=False,
                                     relufactor=0.2)
        o_c2 = layers.general_conv2d(o_c1, ndf * 2, f, f, 2, 2,
                                     0.02, "SAME", "c2", relufactor=0.2)
        o_c3 = layers.general_conv2d(o_c2, ndf * 4, f, f, 2, 2,
                                     0.02, "SAME", "c3", relufactor=0.2)
        o_c4 = layers.general_conv2d(o_c3, ndf * 8, f, f, 1, 1,
                                     0.02, "SAME", "c4", relufactor=0.2)
        o_c5 = layers.general_conv2d(
            o_c4, 1, f, f, 1, 1, 0.02,
            "SAME", "c5", do_norm=False, do_relu=False
        )

        return o_c5
Пример #7
0
def patch_discriminator(inputdisc, name="discriminator"):
    with tf.variable_scope(name):
        f = 4

        patch_input = tf.random_crop(inputdisc, [1, 7, 7, 3])
        o_c1 = layers.general_conv2d(patch_input, ndf, f, f, 2, 2,
                                     0.02, "SAME", "c1", do_norm="False",
                                     relufactor=0.2)
        o_c2 = layers.general_conv2d(o_c1, ndf * 2, f, f, 2, 2,
                                     0.02, "SAME", "c2", relufactor=0.2)
        o_c3 = layers.general_conv2d(o_c2, ndf * 4, f, f, 2, 2,
                                     0.02, "SAME", "c3", relufactor=0.2)
        o_c4 = layers.general_conv2d(o_c3, ndf * 8, f, f, 2, 2,
                                     0.02, "SAME", "c4", relufactor=0.2)
        o_c5 = layers.general_conv2d(
            o_c4, 1, f, f, 1, 1, 0.02, "SAME", "c5", do_norm=False,
            do_relu=False)

        return o_c5
Пример #8
0
def build_resnet_block(inputres, dim, name="resnet", padding="REFLECT"):
    """build a single block of resnet.

    :param inputres: inputres
    :param dim: dim
    :param name: name
    :param padding: for tensorflow version use REFLECT; for pytorch version use
     CONSTANT
    :return: a single block of resnet.
    """
    with tf.variable_scope(name):
        out_res = tf.pad(inputres, [[0, 0], [1, 1], [
            1, 1], [0, 0]], padding)
        out_res = layers.general_conv2d(
            out_res, dim, 3, 3, 1, 1, 0.02, "VALID", "c1")
        out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], padding)
        out_res = layers.general_conv2d(
            out_res, dim, 3, 3, 1, 1, 0.02, "VALID", "c2", do_relu=False)

        return tf.nn.relu(out_res + inputres)
Пример #9
0
def build_generator_resnet_9blocks(inputgen, name="generator"):
    with tf.variable_scope(name):
        f = 7
        ks = 3

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           "REFLECT")
        o_c1 = general_conv2d(pad_input, ngf, f, f, 1, 1, 0.02, name="c1")
        o_c2 = general_conv2d(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c2")
        o_c3 = general_conv2d(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME", "c3")

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1")
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2")
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3")
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4")
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5")
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6")
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7")
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8")
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9")

        o_c4 = general_deconv2d(o_r9, [batch_size, 128, 128, ngf * 2], ngf * 2,
                                ks, ks, 2, 2, 0.02, "SAME", "c4")
        o_c5 = general_deconv2d(o_c4, [batch_size, 256, 256, ngf], ngf, ks, ks,
                                2, 2, 0.02, "SAME", "c5")
        o_c6 = general_conv2d(o_c5,
                              img_layer,
                              f,
                              f,
                              1,
                              1,
                              0.02,
                              "SAME",
                              "c6",
                              do_relu=False)

        # Adding the tanh layer

        out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #10
0
def discriminator_aux(inputdisc, name="discriminator"):
    with tf.variable_scope(name):
        f = 4
        padw = 2

        pad_input = tf.pad(inputdisc, [[0, 0], [padw, padw], [padw, padw], [0, 0]], "CONSTANT")
        o_c1 = layers.general_conv2d(pad_input, ndf, f, f, 2, 2, 0.02, "VALID", "c1", do_norm=False, relufactor=0.2, norm_type='Ins')

        pad_o_c1 = tf.pad(o_c1, [[0, 0], [padw, padw], [padw, padw], [0, 0]], "CONSTANT")
        o_c2 = layers.general_conv2d(pad_o_c1, ndf * 2, f, f, 2, 2, 0.02, "VALID", "c2", relufactor=0.2, norm_type='Ins')

        pad_o_c2 = tf.pad(o_c2, [[0, 0], [padw, padw], [padw, padw], [0, 0]], "CONSTANT")
        o_c3 = layers.general_conv2d(pad_o_c2, ndf * 4, f, f, 2, 2, 0.02, "VALID", "c3", relufactor=0.2, norm_type='Ins')

        pad_o_c3 = tf.pad(o_c3, [[0, 0], [padw, padw], [padw, padw], [0, 0]], "CONSTANT")
        o_c4 = layers.general_conv2d(pad_o_c3, ndf * 8, f, f, 1, 1, 0.02, "VALID", "c4", relufactor=0.2, norm_type='Ins')

        pad_o_c4 = tf.pad(o_c4, [[0, 0], [padw, padw], [padw, padw], [0, 0]], "CONSTANT")
        o_c5 = layers.general_conv2d(pad_o_c4, 2, f, f, 1, 1, 0.02, "VALID", "c5", do_norm=False, do_relu=False)

        return tf.expand_dims(o_c5[...,0], axis=3), tf.expand_dims(o_c5[...,1], axis=3)
Пример #11
0
def build_decoderc(inputde, name='decoder',skip=False):
    with tf.variable_scope(name):
        ks = 3
        padding = "CONSTANT"

        o_c1 = layers.general_conv2d(inputde, ngf * 4, ks, ks, 1, 1, 0.02, "SAME", "c1", norm_type='Ins')
        o_r1 = build_resnet_block(o_c1, ngf * 4, "r1", padding, norm_type='Ins')
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding, norm_type='Ins')
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding, norm_type='Ins')
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding, norm_type='Ins')

        return o_r4
Пример #12
0
def discriminator(inputdisc,  mask, transition_rate, donorm,  name="discriminator"):

    with tf.variable_scope(name):
        mask = tf.cast(tf.greater_equal(mask, transition_rate), tf.float32)
        inputdisc = tf.multiply(inputdisc, mask)
        f = 4
        padw = 2
        pad_input = tf.pad(inputdisc, [[0, 0], [padw, padw], [
            padw, padw], [0, 0]], "CONSTANT")

        o_c1 = layers.general_conv2d(pad_input, donorm, ndf, f, f, 2, 2,
                                     0.02, "VALID", "c1",
                                     relufactor=0.2)

        pad_o_c1 = tf.pad(o_c1, [[0, 0], [padw, padw], [
            padw, padw], [0, 0]], "CONSTANT")

        o_c2 = layers.general_conv2d(pad_o_c1, donorm, ndf * 2, f, f, 2, 2,
                                     0.02, "VALID", "c2",  relufactor=0.2)

        pad_o_c2 = tf.pad(o_c2, [[0, 0], [padw, padw], [
            padw, padw], [0, 0]], "CONSTANT")

        o_c3 = layers.general_conv2d(pad_o_c2, donorm, ndf * 4, f, f, 2, 2,
                                     0.02, "VALID", "c3", relufactor=0.2)

        pad_o_c3 = tf.pad(o_c3, [[0, 0], [padw, padw], [
            padw, padw], [0, 0]], "CONSTANT")

        o_c4 = layers.general_conv2d(pad_o_c3, donorm, ndf * 8, f, f, 1, 1,
                                     0.02, "VALID", "c4", relufactor=0.2)
        # o_c4 = tf.multiply(o_c4, mask_4)
        pad_o_c4 = tf.pad(o_c4, [[0, 0], [padw, padw], [
            padw, padw], [0, 0]], "CONSTANT")

        o_c5 = layers.general_conv2d(
            pad_o_c4, tf.constant(False, dtype=bool), 1, f, f, 1, 1, 0.02, "VALID", "c5", do_relu=False)

        return o_c5
Пример #13
0
def build_encoderdiffb(inputen, name='encoder', is_training=True, skip=False, keep_rate=0.75):
    with tf.variable_scope(name):
        fb = 8
        k1 = 3
        padding = "CONSTANT"

        o_c1 = layers.general_conv2d(inputen, fb, 7, 7, 1, 1, 0.01, 'SAME', name="c1", norm_type="Batch", is_training=is_training, keep_rate=keep_rate)
        o_r1 = build_resnet_block(o_c1, fb, "r1", padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        out1 = tf.nn.max_pool(o_r1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        o_r2 = build_resnet_block_ds(out1, fb, fb*2, "r2", padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        out2 = tf.nn.max_pool(o_r2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        o_r3 = build_resnet_block_ds(out2, fb*2, fb*4, 'r3', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_r4 = build_resnet_block(o_r3, fb*4, 'r4', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        out3 = tf.nn.max_pool(o_r4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        o_c2 = layers.general_conv2d(out3, 32, k1, k1, 1, 1, 0.01, 'SAME', 'c2', norm_type='Batch', is_training=is_training,keep_rate=keep_rate)
        o_c3 = layers.general_conv2d(o_c2, 32, k1, k1, 1, 1, 0.01, 'SAME', 'c3', norm_type='Batch', is_training=is_training, keep_rate=keep_rate)


        return o_c3
Пример #14
0
def build_decoder(inputde, inputimg, name='decoder', skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"

        o_c1 = layers.general_conv2d(inputde, ngf * 4, ks, ks, 1, 1, 0.02, "SAME", "c1", norm_type='Ins')
        o_r1 = build_resnet_block(o_c1, ngf * 4, "r1", padding, norm_type='Ins')
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding, norm_type='Ins')
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding, norm_type='Ins')
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding, norm_type='Ins')
        o_c3 = layers.general_deconv2d(o_r4, [BATCH_SIZE, 64, 64, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')
        o_c4 = layers.general_deconv2d(o_c3, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d(o_c5, 1, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputimg + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #15
0
def build_encoder(inputen, name='encoder', skip=False, is_training=True, keep_rate=0.75):
    with tf.variable_scope(name):
        fb = 16
        k1 = 3
        padding = "CONSTANT"

        o_c1 = layers.general_conv2d(inputen, fb, 7, 7, 1, 1, 0.01, 'SAME', name="c1", norm_type="Batch", is_training=is_training, keep_rate=keep_rate)
        o_r1 = build_resnet_block(o_c1, fb, "r1", padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        out1 = tf.nn.max_pool(o_r1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        o_r2 = build_resnet_block_ds(out1, fb, fb*2, "r2", padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        out2 = tf.nn.max_pool(o_r2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        o_r3 = build_resnet_block_ds(out2, fb*2, fb*4, 'r3', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_r4 = build_resnet_block(o_r3, fb*4, 'r4', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        out3 = tf.nn.max_pool(o_r4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        o_r5 = build_resnet_block_ds(out3, fb*4, fb*8, 'r5', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_r6 = build_resnet_block(o_r5, fb*8, 'r6', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)

        o_r7 = build_resnet_block_ds(o_r6, fb*8, fb*16, 'r7', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_r8 = build_resnet_block(o_r7, fb*16, 'r8', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)

        o_r9 = build_resnet_block(o_r8, fb*16, 'r9', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_r10 = build_resnet_block(o_r9, fb * 16, 'r10', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)

        o_r11 = build_resnet_block_ds(o_r10, fb * 16, fb * 32, 'r11', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_r12 = build_resnet_block(o_r11, fb * 32, 'r12', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)

        o_d1 = build_drn_block(o_r12, fb*32, 'd1', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)
        o_d2 = build_drn_block(o_d1, fb*32, 'd2', padding, norm_type='Batch', is_training=is_training, keep_rate=keep_rate)

        o_c2 = layers.general_conv2d(o_d2, fb*32, k1, k1, 1, 1, 0.01, 'SAME', 'c2', norm_type='Batch', is_training=is_training,keep_rate=keep_rate)
        o_c3 = layers.general_conv2d(o_c2, fb*32, k1, k1, 1, 1, 0.01, 'SAME', 'c3', norm_type='Batch', is_training=is_training, keep_rate=keep_rate)


        return o_c3
Пример #16
0
def attention_2(x, ch,name='attention',is_training=True, keep_rate=0.75):
    with tf.variable_scope(name):
        batch_size, height, width, num_channels = x.get_shape().as_list()
        #print("X:",num_channels)
        #print("batch_size:", batch_size)
        f = layers.general_conv2d(x, ch//8, 1, 1, 1, 1, 0.01, 'SAME', 'f1', norm_type='Batch', is_training=is_training,
                                  keep_rate=keep_rate)#64
        f = tf.nn.max_pool(f, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')#32
        #print("f:", f.get_shape().as_list())

        g = layers.general_conv2d(x, ch//8, 1, 1, 1, 1, 0.01, 'SAME', 'f2', norm_type='Batch', is_training=is_training,
                                  keep_rate=keep_rate)#64
        #print("g:", g.get_shape().as_list())

        h = layers.general_conv2d(x, ch//2, 1, 1, 1, 1, 0.01, 'SAME', 'f3', norm_type='Batch', is_training=is_training,
                                  keep_rate=keep_rate)
        h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        #print("h:", h.get_shape().as_list())

        # N = h * w
        s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True)  # # [bs, N, N]

        beta = tf.nn.softmax(s)  # attention map

        o = tf.matmul(beta, hw_flatten(h))  # [bs, N, C]
        gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))

        o = tf.reshape(o, shape=[tf.shape(x)[0], height, width, num_channels // 2])  # [bs, h, w, C]
        #print("0:", o.get_shape().as_list())

        # o = tf.nn.conv(o, ch, kernel=1, stride=1, sn=True, scope='attn_conv')
        o = layers.general_conv2d(o, ch, 1, 1, 1, 1, 0.01,'VALID', name="f4",norm_type='Batch', do_relu=False, is_training=is_training,
                                  keep_rate=keep_rate)#64
        x = gamma * o + x

        return x
Пример #17
0
def build_decodernewb(o_r4, inputimg, name='decoder', skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3

        o_c3 = layers.general_deconv2d(o_r4, [BATCH_SIZE, 64, 64, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')
        o_c4 = layers.general_deconv2d(o_c3, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d(o_c5, 1, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputimg + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #18
0
def build_generator_resnet_9blocks_tf(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "REFLECT"
        #tf.pad()只是一种补齐的方式而已:可以参考http://m.blog.csdn.net/zhang_bei_qing/article/details/75090203
        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        #以下的三层卷积层,主要目的是提取出输入图像的主要特征(编码器)
        #图像原输入为[256,256,3]--->[256,256,32]-->[128,128,64]-->[64,64,128]
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1")
        o_c2 = layers.general_conv2d(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                     "c2")
        o_c3 = layers.general_conv2d(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME",
                                     "c3")

        #以下9层为转换器,这些网络的作用是组合图像的不同相近特性,然后基于这些
        #特性,确定如何将图像的特征向量OAenc从DA域转换为DB域的特征向量
        #o_r9表示转换层的最终输出,尺寸大小为:[64,64,128]可以看成是DB域中图像的特征向量
        """
        build_resnet_block是一个由两个卷积层组成的神经网络层,其中部分输入数据直接添加到输出。
        这样做是为了确保先前网络层的输入数据信息直接作用于后面的网络层,使得相应输出与原始输入的偏差缩小,
        否则原始图像的特征将不会保留在输出中且输出结果会偏离目标轮廓。在上面也提到,这个任务的一个主要目标是保留原始图像的特征,
        如目标的大小和形状,因此残差网络非常适合完成这些转换。
        """
        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        #以下3层为解码器,解码过程与编码方式完全相反,从特征向量中还原出低级特征,
        # 这是利用了反卷积层(deconvolution)来完成的
        #o_c6为最后的输出[1,256,256,3]
        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #19
0
def discriminator(inputdisc, mask, transition_rate, name="discriminator"):
    with tf.variable_scope(name):
        f = 4

        padw = 2

        pad_input = tf.pad(inputdisc,
                           [[0, 0], [padw, padw], [padw, padw], [0, 0]],
                           "CONSTANT")
        o_c1 = layers.general_conv2d(pad_input,
                                     ndf,
                                     f,
                                     f,
                                     2,
                                     2,
                                     0.02,
                                     "VALID",
                                     "c1",
                                     do_norm=False,
                                     relufactor=0.2)

        pad_o_c1 = tf.pad(o_c1, [[0, 0], [padw, padw], [padw, padw], [0, 0]],
                          "CONSTANT")
        o_c2 = layers.general_conv2d(pad_o_c1,
                                     ndf * 2,
                                     f,
                                     f,
                                     2,
                                     2,
                                     0.02,
                                     "VALID",
                                     "c2",
                                     relufactor=0.2)

        pad_o_c2 = tf.pad(o_c2, [[0, 0], [padw, padw], [padw, padw], [0, 0]],
                          "CONSTANT")
        o_c3 = layers.general_conv2d(pad_o_c2,
                                     ndf * 4,
                                     f,
                                     f,
                                     2,
                                     2,
                                     0.02,
                                     "VALID",
                                     "c3",
                                     relufactor=0.2)

        pad_o_c3 = tf.pad(o_c3, [[0, 0], [padw, padw], [padw, padw], [0, 0]],
                          "CONSTANT")
        o_c4 = layers.general_conv2d(pad_o_c3,
                                     ndf * 8,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "VALID",
                                     "c4",
                                     relufactor=0.2)

        pad_o_c4 = tf.pad(o_c4, [[0, 0], [padw, padw], [padw, padw], [0, 0]],
                          "CONSTANT")
        o_c5 = layers.general_conv2d(pad_o_c4,
                                     1,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "VALID",
                                     "c5",
                                     do_norm=False,
                                     do_relu=False)

        return o_c5
Пример #20
0
def build_generator_resnet_9blocks_bis(inputgen,
                                       mask,
                                       transition_rate,
                                       name="generator",
                                       skip=False):
    """ Instead of normal convolutions, We make use of [Uhrig et al: Sparsity Invariant CNNs].
    However we omit the normalizing factor they use by replaqcing it by 1. This removes the
    effect of the bias coming from masked regions in the input.
    """
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        inputgen = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                          padding)
        mask = tf.pad(mask, [[0, 0], [ks, ks], [ks, ks], [0, 0]], padding)

        o_c1, mask = layers.general_partial_conv2d(inputgen,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf,
                                                   f,
                                                   f,
                                                   1,
                                                   1,
                                                   0.02,
                                                   name="c1")

        o_c1_in = tf.multiply(o_c1, mask)

        o_c2, mask = layers.general_partial_conv2d(o_c1_in,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 2,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c2")

        o_c2_in = tf.multiply(o_c2, mask)

        o_c3, mask = layers.general_partial_conv2d(o_c2_in,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 4,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c3")

        o_c3_in = tf.multiply(o_c3, mask)

        o_r1, mask_r1 = build_partial_resnet_block(o_c3_in, mask, ngf * 4,
                                                   "r1")
        o_r2, mask_r2 = build_partial_resnet_block(o_r1, mask_r1, ngf * 4,
                                                   "r2")
        o_r3, mask_r3 = build_partial_resnet_block(o_r2, mask_r2, ngf * 4,
                                                   "r3")
        o_r4, mask_r4 = build_partial_resnet_block(o_r3, mask_r3, ngf * 4,
                                                   "r4")
        o_r5, mask_r5 = build_partial_resnet_block(o_r4, mask_r4, ngf * 4,
                                                   "r5")
        o_r6, mask_r6 = build_partial_resnet_block(o_r5, mask_r5, ngf * 4,
                                                   "r6")
        o_r7, mask_r7 = build_partial_resnet_block(o_r6, mask_r6, ngf * 4,
                                                   "r7")
        o_r8, mask_r8 = build_partial_resnet_block(o_r7, mask_r7, ngf * 4,
                                                   "r8")
        o_r9, mask_r9 = build_partial_resnet_block(o_r8, mask_r8, ngf * 4,
                                                   "r9")

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #21
0
def build_generator_resnet_9blocks_bis(inputgen,
                                       mask,
                                       transition_rate,
                                       name="generator",
                                       skip=False):

    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        inputgen = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                          padding)
        mask = tf.pad(mask, [[0, 0], [ks, ks], [ks, ks], [0, 0]], padding)

        o_c1, mask = layers.general_partial_conv2d(inputgen,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf,
                                                   f,
                                                   f,
                                                   1,
                                                   1,
                                                   0.02,
                                                   name="c1")

        # o_c1_in = tf.multiply(o_c1, mask)

        o_c2, mask = layers.general_partial_conv2d(o_c1,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 2,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c2")

        # o_c2_in = tf.multiply(o_c2, mask)

        o_c3, mask = layers.general_partial_conv2d(o_c2,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 4,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c3")

        # o_c3_in = tf.multiply(o_c3, mask)

        #o_r1, mask_r1 = build_partial_resnet_block(o_c3, mask, ngf * 4, "r1")
        #o_r2, mask_r2 = build_partial_resnet_block(o_r1, mask_r1, ngf * 4, "r2")
        #o_r3, mask_r3 = build_partial_resnet_block(o_r2, mask_r2, ngf * 4, "r3")
        #o_r4, mask_r4 = build_partial_resnet_block(o_r3, mask_r3, ngf * 4, "r4")
        #o_r5, mask_r5 = build_partial_resnet_block(o_r4, mask_r4, ngf * 4, "r5")
        #o_r6, mask_r6 = build_partial_resnet_block(o_r5, mask_r5, ngf * 4, "r6")
        #o_r7, mask_r7 = build_partial_resnet_block(o_r6, mask_r6, ngf * 4, "r7")
        #o_r8, mask_r8 = build_partial_resnet_block(o_r7, mask_r7, ngf * 4, "r8")
        #o_r9, mask_r9 = build_partial_resnet_block(o_r8, mask_r8, ngf * 4, "r9")

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
Пример #22
0
def generator(inputA, edges, name="generator"):
    f = 5
    ks = 3
    # padding = "REFLECT"

    n_pool = 3
    growth_rate = 8
    n_layers_per_block = [2, 2, 2, 2, 2, 2, 2]

    with tf.variable_scope(name):
        n_filters = ngf

        # skip_connection_list = []

        o_c1 = layers.general_conv2d(inputA,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1",
                                     padding="SAME")
        o_c2 = layers.general_conv2d(o_c1, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                     "c2")

        o_c3 = layers.general_conv2d(o_c2, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                     "c3")

        o_c4 = layers.general_deconv2d(o_c3, ngf, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")

        o_c4_1 = layers.general_conv2d(o_c4, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                       "c4_1")

        o_c5 = layers.general_deconv2d(o_c4_1, ngf, ks, ks, 2, 2, 0.02, "SAME",
                                       "c5")

        o_c5_1 = layers.general_conv2d(o_c5, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                       "c5_1")

        o_c6 = layers.general_deconv2d(o_c5_1, ngf * 2, ks, ks, 2, 2, 0.02,
                                       "SAME", "c6")

        stack = slim.conv2d(
            o_c6,
            ngf, [3, 3],
            padding='SAME',
            activation_fn=tf.nn.leaky_relu,
            weights_initializer=tf.contrib.layers.variance_scaling_initializer(
                factor=2.0, mode='FAN_IN', uniform=False))

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[0],
                              growth_rate,
                              scope='denseblock%d' % (1))
        n_filters += growth_rate * n_layers_per_block[0]

        out1 = tf.tile(tf.expand_dims(edges[:, :, :, 1], 3), [1, 1, 1, 48])
        stack = tf.add(stack, out1)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[1],
                              growth_rate,
                              scope='denseblock%d' % (2))
        n_filters += growth_rate * n_layers_per_block[1]

        out2 = tf.tile(tf.expand_dims(edges[:, :, :, 2], 3), [1, 1, 1, 64])
        stack = tf.add(stack, out2)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[2],
                              growth_rate,
                              scope='denseblock%d' % (3))
        n_filters += growth_rate * n_layers_per_block[1]

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 1))

        out3 = tf.tile(tf.expand_dims(edges[:, :, :, 3], 3), [1, 1, 1, 96])
        stack = tf.add(stack, out3)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool + 1],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 2))

        out4 = tf.tile(tf.expand_dims(edges[:, :, :, 4], 3), [1, 1, 1, 112])
        stack = tf.add(stack, out4)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool + 2],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 3))

        out5 = tf.tile(tf.expand_dims(edges[:, :, :, 5], 3), [1, 1, 1, 128])
        stack = tf.add(stack, out5)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool + 3],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 4))

        net = tf.nn.tanh(
            layers.general_conv2d(stack,
                                  1,
                                  3,
                                  3,
                                  1,
                                  1,
                                  0.02,
                                  "SAME",
                                  "c6",
                                  do_norm=False,
                                  do_relu=False))

        net = (net + tf.image.resize_images(
            inputA, tf.shape(net)[1:3], align_corners=True)) / 2.0

        return net
Пример #23
0
def denseNet(inputgen,
             gen_type,
             name="generator",
             preset_model='FC-DenseNet56',
             n_filters_first_conv=32,
             n_pool=1,
             growth_rate=2,
             n_layers_per_block=3):
    if preset_model == 'FC-DenseNet56':
        n_pool = 5
        growth_rate = 12
        n_layers_per_block = 4
    elif preset_model == 'FC-DenseNet67':
        n_pool = 5
        growth_rate = 16
        n_layers_per_block = 5
    elif preset_model == 'FC-DenseNet103':
        n_pool = 5
        growth_rate = 16
        n_layers_per_block = [4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4]
    else:
        raise ValueError(
            "Unsupported FC-DenseNet model '%s'. This function only supports FC-DenseNet56, FC-DenseNet67, and FC-DenseNet103"
            % (preset_model))

    with tf.variable_scope(name):
        if type(n_layers_per_block) == list:
            assert (len(n_layers_per_block) == 2 * n_pool + 1)
        elif type(n_layers_per_block) == int:
            n_layers_per_block = [n_layers_per_block] * (2 * n_pool + 1)
        else:
            raise ValueError

        #stack = slim.conv2d(inputgen, n_filters_first_conv, [7, 7], scope='first_conv', activation_fn=None)
        pad_input = tf.pad(inputgen, [[0, 0], [3, 3], [3, 3], [0, 0]],
                           "REFLECT")
        stack = layers.general_conv2d(inputgen, ngf, 7, 7, 1, 1, 0.02, "SAME",
                                      "c1")
        #stack = layers.general_conv2d(stack, ngf * 2, 3, 3, 1, 1, 0.02, "SAME", "c2")
        n_filters = ngf
        skip_connection_list = []

        for i in range(n_pool):
            stack, _ = DenseBlock(stack,
                                  n_layers_per_block[i],
                                  growth_rate,
                                  scope='denseblock%d' % (i + 1))
            n_filters += growth_rate * n_layers_per_block[i]
            skip_connection_list.append(stack)
            stack = TransitionDown(stack,
                                   n_filters,
                                   scope='transitiondown%d' % (i + 1))

        skip_connection_list = skip_connection_list[::-1]

        stack, block_to_upsample = DenseBlock(stack,
                                              n_layers_per_block[n_pool],
                                              growth_rate,
                                              scope='denseblock%d' %
                                              (n_pool + 1))

        for i in range(n_pool):
            n_filters_keep = growth_rate * n_layers_per_block[n_pool + i]
            stack = TransitionUp(block_to_upsample,
                                 skip_connection_list[i],
                                 n_filters_keep,
                                 scope='transitionup%d' % (n_pool + i + 1))
            stack, block_to_upsample = DenseBlock(
                stack,
                n_layers_per_block[n_pool + i + 1],
                growth_rate,
                scope='denseblock%d' % (n_pool + i + 2))

        o_c3 = layers.general_conv2d(stack, ngf * 4, 1, 1, 1, 1, 0.02, "SAME",
                                     "c3")
        o_r1 = tf.nn.relu(
            build_resnet_block(o_c3, ngf * 4, "r1", padding="REFLECT"))
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding="REFLECT")
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding="REFLECT")

        if gen_type == 'A':
            out_layers = IMG_CHANNELS
        elif gen_type == 'B':
            out_layers = IMG_CHANNELS + 1

        net = tf.nn.tanh(
            layers.general_conv2d(o_r3,
                                  out_layers,
                                  3,
                                  3,
                                  1,
                                  1,
                                  0.02,
                                  "SAME",
                                  "c6",
                                  do_norm=False,
                                  do_relu=False))
    return net
Пример #24
0
def dehaze_resize_with_deconv(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "REFLECT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1",
                                     relufactor=0.2)  #256*256*32

        o_c2 = layers.general_conv2d(o_c1,
                                     ngf * 2,
                                     ks,
                                     ks,
                                     2,
                                     2,
                                     0.02,
                                     "SAME",
                                     "c2",
                                     relufactor=0.2)  #128*128*64

        o_c3 = layers.general_conv2d(o_c2,
                                     ngf * 4,
                                     ks,
                                     ks,
                                     2,
                                     2,
                                     0.02,
                                     "SAME",
                                     "c3",
                                     relufactor=0.2)  #64*64*128

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)  #64*64*128
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        with tf.variable_scope("resize_conv1"):
            o_c4_0 = tf.concat([o_r9, o_c3], 3)
            o_c4_1 = layers.instance_norm(o_c4_0)
            o_c4_1 = layers.deconv2d_resize(o_c4_1,
                                            ngf * 4,
                                            kernel=ks,
                                            stride=(2, 2),
                                            name='deconv_1')
            o_c4_1 = layers.lrelu(o_c4_1)
            o_c4_2 = layers.general_deconv2d(o_c4_0,
                                             [BATCH_SIZE, 128, 128, ngf * 2],
                                             ngf * 2, ks, ks, 2, 2, 0.02,
                                             "SAME", "c4")
            o_c4_3 = tf.concat([o_c4_1, o_c4_2], 3)
            o_c4_4 = layers.one_conv(o_c4_3, 64)
            o_c4_4 = layers.lrelu(o_c4_4)

        with tf.variable_scope("resize_conv2"):
            o_c5_0 = tf.concat([o_c2, o_c4_4], 3)
            o_c5 = layers.instance_norm(o_c5_0)
            o_c5_1 = layers.deconv2d_resize(o_c5,
                                            ngf * 2,
                                            kernel=ks,
                                            stride=(2, 2),
                                            name='deconv_2')
            o_c5_1 = layers.lrelu(o_c5_1)

            o_c5_2 = layers.general_deconv2d(o_c5_0,
                                             [BATCH_SIZE, 256, 256, ngf], ngf,
                                             ks, ks, 2, 2, 0.02, "SAME", "c5")
            o_c5_3 = tf.concat([o_c5_1, o_c5_2], 3)
            o_c5_4 = layers.one_conv(o_c5_3, 32)
            o_c5_4 = layers.lrelu(o_c5_4)

        with tf.variable_scope("Output_layer"):
            #  o_c6_0 = tf.concat([o_c5_1, o_c5_2],3)

            o_c6 = layers.general_conv2d(o_c5_4,
                                         IMG_CHANNELS,
                                         f,
                                         f,
                                         1,
                                         1,
                                         0.02,
                                         "SAME",
                                         "c6",
                                         do_norm=False,
                                         do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen