예제 #1
0
파일: model.py 프로젝트: zwq1230/SIFA
def build_generator_resnet_9blocks(inputgen, inputimg, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]], padding)
        o_c1 = layers.general_conv2d_ga(pad_input, ngf, f, f, 1, 1, 0.02, name="c1", norm_type='Ins')
        o_c2 = layers.general_conv2d_ga(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c2", norm_type='Ins')
        o_c3 = layers.general_conv2d_ga(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')

        o_r1 = build_resnet_block_ins(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block_ins(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block_ins(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block_ins(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block_ins(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block_ins(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block_ins(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block_ins(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block_ins(o_r8, ngf * 4, "r9", padding)

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d_ga(o_c5, 1, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputimg + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #2
0
def build_generator_resnet_9blocks(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [
            ks, ks], [0, 0]], padding)
        #pad_input.shape = (1, 134, 134, 3)
        o_c1 = layers.general_conv2d(
            pad_input, ngf, f, f, 1, 1, 0.02, name="c1")
        #o_c1.shape = (1, 128, 128, 32)
        o_c2 = layers.general_conv2d(
            o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c2")
        #o_c2.shape = (1, 64, 64, 64)
        o_c3 = layers.general_conv2d(
            o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME", "c3")
        #o_c3.shape = (1, 32, 32, 128)

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding) 
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)
        #o_r9.shape = (1, 32, 32, 128)

        o_d1 = layers.general_deconv2d(
            o_r9, [BATCH_SIZE, 14, 14, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02,
            "SAME", "d1")
        #o_c5.shape = (1, 64, 64, 64)

        o_d2 = layers.general_deconv2d(
            o_d1, [BATCH_SIZE, 28, 28, ngf], ngf, ks, ks, 2, 2, 0.02,
            "SAME", "d2")
        #o_c5.shape = (1, 128, 128, 32)

        o_c5 = layers.general_conv2d(o_d2, IMG_CHANNELS, f, f, 1, 1,
                                     0.02, "SAME", "c5",
                                     do_norm=False, do_relu=False)
        #o_c6.shape = (1, 128, 128, 3)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c5, "t1")
        else:
            out_gen = tf.nn.tanh(o_c5, "t1")

        return out_gen
예제 #3
0
def build_decodernewb(o_r4, inputimg, name='decoder', skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3

        o_c3 = layers.general_deconv2d(o_r4, [BATCH_SIZE, 64, 64, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')
        o_c4 = layers.general_deconv2d(o_c3, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d(o_c5, 1, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputimg + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #4
0
def build_segmenternew(inputse, name='segmenter',keep_rate=0.75):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        
        o_c1 = layers.general_conv2d(inputse, ngf * 4, ks, ks, 1, 1, 0.02, "SAME", "c1", norm_type='Ins',keep_rate=keep_rate)
        o_r1 = build_resnet_block(o_c1, ngf * 4, "r1", padding, norm_type='Ins')
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding, norm_type='Ins')
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding, norm_type='Ins')
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding, norm_type='Ins')
        o_c3 = layers.general_deconv2d(o_r4, [BATCH_SIZE, 64, 64, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')
        o_c4 = layers.general_deconv2d(o_c3, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d(o_c5, 4, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        return o_c6
예제 #5
0
def build_generator_resnet_9blocks(inputgen, name="generator", skip=False):

    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        inputgen = tf.pad(inputgen, [[0, 0], [ks, ks], [
            ks, ks], [0, 0]], padding)

        o_c1 = layers.general_conv2d(
            inputgen, tf.constant(True, dtype=bool), ngf, f, f, 1, 1, 0.02, name="c1")

        o_c2 = layers.general_conv2d(
            o_c1, tf.constant(True, dtype=bool), ngf * 2, ks, ks, 2, 2, 0.02, padding='same', name="c2")

        o_c3 = layers.general_conv2d(
            o_c2, tf.constant(True, dtype=bool), ngf * 4, ks, ks, 2, 2, 0.02, padding='same', name="c3")

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        o_c4 = layers.general_deconv2d(
            o_r9, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02,
            "SAME", "c4")

        o_c5 = layers.general_deconv2d(
            o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02,
            "SAME", "c5")

        o_c6 = layers.general_conv2d(o_c5, tf.constant(False, dtype=bool), IMG_CHANNELS, f, f, 1, 1,
                                     0.02, "SAME", "c6", do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #6
0
def build_generator_resnet_9blocks(inputgen, name="generator"):
    with tf.variable_scope(name):
        f = 7
        ks = 3

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           "REFLECT")
        o_c1 = general_conv2d(pad_input, ngf, f, f, 1, 1, 0.02, name="c1")
        o_c2 = general_conv2d(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c2")
        o_c3 = general_conv2d(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME", "c3")

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1")
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2")
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3")
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4")
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5")
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6")
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7")
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8")
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9")

        o_c4 = general_deconv2d(o_r9, [batch_size, 128, 128, ngf * 2], ngf * 2,
                                ks, ks, 2, 2, 0.02, "SAME", "c4")
        o_c5 = general_deconv2d(o_c4, [batch_size, 256, 256, ngf], ngf, ks, ks,
                                2, 2, 0.02, "SAME", "c5")
        o_c6 = general_conv2d(o_c5,
                              img_layer,
                              f,
                              f,
                              1,
                              1,
                              0.02,
                              "SAME",
                              "c6",
                              do_relu=False)

        # Adding the tanh layer

        out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #7
0
파일: model.py 프로젝트: zwq1230/SIFA
def build_decoder(inputde, inputimg, name='decoder', skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"

        o_c1 = layers.general_conv2d(inputde, ngf * 4, ks, ks, 1, 1, 0.02, "SAME", "c1", norm_type='Ins')
        o_r1 = build_resnet_block(o_c1, ngf * 4, "r1", padding, norm_type='Ins')
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding, norm_type='Ins')
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding, norm_type='Ins')
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding, norm_type='Ins')
        o_c3 = layers.general_deconv2d(o_r4, [BATCH_SIZE, 64, 64, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c3", norm_type='Ins')
        o_c4 = layers.general_deconv2d(o_c3, [BATCH_SIZE, 128, 128, ngf * 2], ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4", norm_type='Ins')
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf, ks, ks, 2, 2, 0.02, "SAME", "c5", norm_type='Ins')
        o_c6 = layers.general_conv2d(o_c5, 1, f, f, 1, 1, 0.02, "SAME", "c6", do_norm=False, do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputimg + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #8
0
def build_generator_resnet_9blocks_bis(inputgen,
                                       mask,
                                       transition_rate,
                                       name="generator",
                                       skip=False):
    """ Instead of normal convolutions, We make use of [Uhrig et al: Sparsity Invariant CNNs].
    However we omit the normalizing factor they use by replaqcing it by 1. This removes the
    effect of the bias coming from masked regions in the input.
    """
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        inputgen = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                          padding)
        mask = tf.pad(mask, [[0, 0], [ks, ks], [ks, ks], [0, 0]], padding)

        o_c1, mask = layers.general_partial_conv2d(inputgen,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf,
                                                   f,
                                                   f,
                                                   1,
                                                   1,
                                                   0.02,
                                                   name="c1")

        o_c1_in = tf.multiply(o_c1, mask)

        o_c2, mask = layers.general_partial_conv2d(o_c1_in,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 2,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c2")

        o_c2_in = tf.multiply(o_c2, mask)

        o_c3, mask = layers.general_partial_conv2d(o_c2_in,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 4,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c3")

        o_c3_in = tf.multiply(o_c3, mask)

        o_r1, mask_r1 = build_partial_resnet_block(o_c3_in, mask, ngf * 4,
                                                   "r1")
        o_r2, mask_r2 = build_partial_resnet_block(o_r1, mask_r1, ngf * 4,
                                                   "r2")
        o_r3, mask_r3 = build_partial_resnet_block(o_r2, mask_r2, ngf * 4,
                                                   "r3")
        o_r4, mask_r4 = build_partial_resnet_block(o_r3, mask_r3, ngf * 4,
                                                   "r4")
        o_r5, mask_r5 = build_partial_resnet_block(o_r4, mask_r4, ngf * 4,
                                                   "r5")
        o_r6, mask_r6 = build_partial_resnet_block(o_r5, mask_r5, ngf * 4,
                                                   "r6")
        o_r7, mask_r7 = build_partial_resnet_block(o_r6, mask_r6, ngf * 4,
                                                   "r7")
        o_r8, mask_r8 = build_partial_resnet_block(o_r7, mask_r7, ngf * 4,
                                                   "r8")
        o_r9, mask_r9 = build_partial_resnet_block(o_r8, mask_r8, ngf * 4,
                                                   "r9")

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #9
0
def dehaze_resize_with_deconv(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "REFLECT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1",
                                     relufactor=0.2)  #256*256*32

        o_c2 = layers.general_conv2d(o_c1,
                                     ngf * 2,
                                     ks,
                                     ks,
                                     2,
                                     2,
                                     0.02,
                                     "SAME",
                                     "c2",
                                     relufactor=0.2)  #128*128*64

        o_c3 = layers.general_conv2d(o_c2,
                                     ngf * 4,
                                     ks,
                                     ks,
                                     2,
                                     2,
                                     0.02,
                                     "SAME",
                                     "c3",
                                     relufactor=0.2)  #64*64*128

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)  #64*64*128
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        with tf.variable_scope("resize_conv1"):
            o_c4_0 = tf.concat([o_r9, o_c3], 3)
            o_c4_1 = layers.instance_norm(o_c4_0)
            o_c4_1 = layers.deconv2d_resize(o_c4_1,
                                            ngf * 4,
                                            kernel=ks,
                                            stride=(2, 2),
                                            name='deconv_1')
            o_c4_1 = layers.lrelu(o_c4_1)
            o_c4_2 = layers.general_deconv2d(o_c4_0,
                                             [BATCH_SIZE, 128, 128, ngf * 2],
                                             ngf * 2, ks, ks, 2, 2, 0.02,
                                             "SAME", "c4")
            o_c4_3 = tf.concat([o_c4_1, o_c4_2], 3)
            o_c4_4 = layers.one_conv(o_c4_3, 64)
            o_c4_4 = layers.lrelu(o_c4_4)

        with tf.variable_scope("resize_conv2"):
            o_c5_0 = tf.concat([o_c2, o_c4_4], 3)
            o_c5 = layers.instance_norm(o_c5_0)
            o_c5_1 = layers.deconv2d_resize(o_c5,
                                            ngf * 2,
                                            kernel=ks,
                                            stride=(2, 2),
                                            name='deconv_2')
            o_c5_1 = layers.lrelu(o_c5_1)

            o_c5_2 = layers.general_deconv2d(o_c5_0,
                                             [BATCH_SIZE, 256, 256, ngf], ngf,
                                             ks, ks, 2, 2, 0.02, "SAME", "c5")
            o_c5_3 = tf.concat([o_c5_1, o_c5_2], 3)
            o_c5_4 = layers.one_conv(o_c5_3, 32)
            o_c5_4 = layers.lrelu(o_c5_4)

        with tf.variable_scope("Output_layer"):
            #  o_c6_0 = tf.concat([o_c5_1, o_c5_2],3)

            o_c6 = layers.general_conv2d(o_c5_4,
                                         IMG_CHANNELS,
                                         f,
                                         f,
                                         1,
                                         1,
                                         0.02,
                                         "SAME",
                                         "c6",
                                         do_norm=False,
                                         do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #10
0
def build_generator_resnet_9blocks_tf(inputgen,
                                      inputref,
                                      name="generator",
                                      skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "REFLECT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1")
        o_c2 = layers.general_conv2d(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                     "c2")
        o_c3 = layers.general_conv2d(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME",
                                     "c3")

        pad_input_ref = tf.pad(inputref, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                               padding)
        o_ref_c1 = layers.general_conv2d(pad_input_ref,
                                         ngf,
                                         f,
                                         f,
                                         1,
                                         1,
                                         0.02,
                                         name="ref_c1")
        o_ref_c2 = layers.general_conv2d(o_ref_c1, ngf * 2, ks, ks, 2, 2, 0.02,
                                         "SAME", "ref_c2")
        o_ref_c3 = layers.general_conv2d(o_ref_c2, ngf * 4, ks, ks, 2, 2, 0.02,
                                         "SAME", "ref_c3")

        o_c_concat = tf.concat([o_c3, o_ref_c3], -1)

        o_r1 = build_resnet_block(o_c_concat, ngf * 8, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 8, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 8, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 8, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 8, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 8, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 8, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 8, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 8, "r9", padding)

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 70, 121, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 140, 242, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
예제 #11
0
def generator(inputA, edges, name="generator"):
    f = 5
    ks = 3
    # padding = "REFLECT"

    n_pool = 3
    growth_rate = 8
    n_layers_per_block = [2, 2, 2, 2, 2, 2, 2]

    with tf.variable_scope(name):
        n_filters = ngf

        # skip_connection_list = []

        o_c1 = layers.general_conv2d(inputA,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1",
                                     padding="SAME")
        o_c2 = layers.general_conv2d(o_c1, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                     "c2")

        o_c3 = layers.general_conv2d(o_c2, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                     "c3")

        o_c4 = layers.general_deconv2d(o_c3, ngf, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")

        o_c4_1 = layers.general_conv2d(o_c4, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                       "c4_1")

        o_c5 = layers.general_deconv2d(o_c4_1, ngf, ks, ks, 2, 2, 0.02, "SAME",
                                       "c5")

        o_c5_1 = layers.general_conv2d(o_c5, ngf, ks, ks, 1, 1, 0.02, "SAME",
                                       "c5_1")

        o_c6 = layers.general_deconv2d(o_c5_1, ngf * 2, ks, ks, 2, 2, 0.02,
                                       "SAME", "c6")

        stack = slim.conv2d(
            o_c6,
            ngf, [3, 3],
            padding='SAME',
            activation_fn=tf.nn.leaky_relu,
            weights_initializer=tf.contrib.layers.variance_scaling_initializer(
                factor=2.0, mode='FAN_IN', uniform=False))

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[0],
                              growth_rate,
                              scope='denseblock%d' % (1))
        n_filters += growth_rate * n_layers_per_block[0]

        out1 = tf.tile(tf.expand_dims(edges[:, :, :, 1], 3), [1, 1, 1, 48])
        stack = tf.add(stack, out1)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[1],
                              growth_rate,
                              scope='denseblock%d' % (2))
        n_filters += growth_rate * n_layers_per_block[1]

        out2 = tf.tile(tf.expand_dims(edges[:, :, :, 2], 3), [1, 1, 1, 64])
        stack = tf.add(stack, out2)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[2],
                              growth_rate,
                              scope='denseblock%d' % (3))
        n_filters += growth_rate * n_layers_per_block[1]

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 1))

        out3 = tf.tile(tf.expand_dims(edges[:, :, :, 3], 3), [1, 1, 1, 96])
        stack = tf.add(stack, out3)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool + 1],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 2))

        out4 = tf.tile(tf.expand_dims(edges[:, :, :, 4], 3), [1, 1, 1, 112])
        stack = tf.add(stack, out4)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool + 2],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 3))

        out5 = tf.tile(tf.expand_dims(edges[:, :, :, 5], 3), [1, 1, 1, 128])
        stack = tf.add(stack, out5)

        stack, _ = DenseBlock(stack,
                              n_layers_per_block[n_pool + 3],
                              growth_rate,
                              scope='denseblock%d' % (n_pool + 4))

        net = tf.nn.tanh(
            layers.general_conv2d(stack,
                                  1,
                                  3,
                                  3,
                                  1,
                                  1,
                                  0.02,
                                  "SAME",
                                  "c6",
                                  do_norm=False,
                                  do_relu=False))

        net = (net + tf.image.resize_images(
            inputA, tf.shape(net)[1:3], align_corners=True)) / 2.0

        return net
예제 #12
0
def build_generator_resnet_9blocks_tf(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "REFLECT"
        #tf.pad()只是一种补齐的方式而已:可以参考http://m.blog.csdn.net/zhang_bei_qing/article/details/75090203
        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        #以下的三层卷积层,主要目的是提取出输入图像的主要特征(编码器)
        #图像原输入为[256,256,3]--->[256,256,32]-->[128,128,64]-->[64,64,128]
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1")
        o_c2 = layers.general_conv2d(o_c1, ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                     "c2")
        o_c3 = layers.general_conv2d(o_c2, ngf * 4, ks, ks, 2, 2, 0.02, "SAME",
                                     "c3")

        #以下9层为转换器,这些网络的作用是组合图像的不同相近特性,然后基于这些
        #特性,确定如何将图像的特征向量OAenc从DA域转换为DB域的特征向量
        #o_r9表示转换层的最终输出,尺寸大小为:[64,64,128]可以看成是DB域中图像的特征向量
        """
        build_resnet_block是一个由两个卷积层组成的神经网络层,其中部分输入数据直接添加到输出。
        这样做是为了确保先前网络层的输入数据信息直接作用于后面的网络层,使得相应输出与原始输入的偏差缩小,
        否则原始图像的特征将不会保留在输出中且输出结果会偏离目标轮廓。在上面也提到,这个任务的一个主要目标是保留原始图像的特征,
        如目标的大小和形状,因此残差网络非常适合完成这些转换。
        """
        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        #以下3层为解码器,解码过程与编码方式完全相反,从特征向量中还原出低级特征,
        # 这是利用了反卷积层(deconvolution)来完成的
        #o_c6为最后的输出[1,256,256,3]
        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen
def build_generator_resnet_9blocks_bis(inputgen,
                                       mask,
                                       transition_rate,
                                       name="generator",
                                       skip=False):

    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "CONSTANT"
        inputgen = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                          padding)
        mask = tf.pad(mask, [[0, 0], [ks, ks], [ks, ks], [0, 0]], padding)

        o_c1, mask = layers.general_partial_conv2d(inputgen,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf,
                                                   f,
                                                   f,
                                                   1,
                                                   1,
                                                   0.02,
                                                   name="c1")

        # o_c1_in = tf.multiply(o_c1, mask)

        o_c2, mask = layers.general_partial_conv2d(o_c1,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 2,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c2")

        # o_c2_in = tf.multiply(o_c2, mask)

        o_c3, mask = layers.general_partial_conv2d(o_c2,
                                                   mask,
                                                   tf.constant(False,
                                                               dtype=bool),
                                                   ngf * 4,
                                                   ks,
                                                   ks,
                                                   2,
                                                   2,
                                                   0.02,
                                                   padding='same',
                                                   name="c3")

        # o_c3_in = tf.multiply(o_c3, mask)

        o_r1, mask_r1 = build_partial_resnet_block(o_c3, mask, ngf * 4, "r1")
        o_r2, mask_r2 = build_partial_resnet_block(o_r1, mask_r1, ngf * 4,
                                                   "r2")
        o_r3, mask_r3 = build_partial_resnet_block(o_r2, mask_r2, ngf * 4,
                                                   "r3")
        o_r4, mask_r4 = build_partial_resnet_block(o_r3, mask_r3, ngf * 4,
                                                   "r4")
        o_r5, mask_r5 = build_partial_resnet_block(o_r4, mask_r4, ngf * 4,
                                                   "r5")
        o_r6, mask_r6 = build_partial_resnet_block(o_r5, mask_r5, ngf * 4,
                                                   "r6")
        o_r7, mask_r7 = build_partial_resnet_block(o_r6, mask_r6, ngf * 4,
                                                   "r7")
        o_r8, mask_r8 = build_partial_resnet_block(o_r7, mask_r7, ngf * 4,
                                                   "r8")
        o_r9, mask_r9 = build_partial_resnet_block(o_r8, mask_r8, ngf * 4,
                                                   "r9")

        o_c4 = layers.general_deconv2d(o_r9, [BATCH_SIZE, 128, 128, ngf * 2],
                                       ngf * 2, ks, ks, 2, 2, 0.02, "SAME",
                                       "c4")
        o_c5 = layers.general_deconv2d(o_c4, [BATCH_SIZE, 256, 256, ngf], ngf,
                                       ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c6 = layers.general_conv2d(o_c5,
                                     IMG_CHANNELS,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     "SAME",
                                     "c6",
                                     do_norm=False,
                                     do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen