예제 #1
0
파일: model.py 프로젝트: liuaishan/AdvPGAN
    def generator_pix2pix(self, image, reuse=False):
        output_size = self.patch_size
        s = math.ceil(output_size/16.0)*16
        s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
        # gf_dim = 16 # Dimension of gen filters in first conv layer.
        with tf.variable_scope("generator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False
            # do we need here???
            #image = image / 255.0
            # liuas 2018.5.9
            # trick: using lrelu instead of relu

            ngf = 16 # number of generator filters in first conv layer
            # encoder_1: [batch, 16, 16, 3] => [batch, 8, 8, ngf]
            conv1 = conv2d(image, ngf, k_h=4, k_w=4, name='adv_g_enc1')
            conv2 = layer_norm(conv2d(lrelu(conv1, 0.2), ngf*2, k_h=4, k_w=4, name='adv_g_enc2'), name='adv_g_enc2ln')
            conv3 = layer_norm(conv2d(lrelu(conv2, 0.2), ngf*4, k_h=4, k_w=4, name='adv_g_enc3'), name='adv_g_enc3ln')
            conv4 = layer_norm(conv2d(lrelu(conv3, 0.2), ngf*8, k_h=4, k_w=4, name='adv_g_enc4'), name='adv_g_enc4ln')
            deconv1, _, _ = deconv2d(tf.nn.relu(conv4), [self.batch_size, s8, s8, ngf*4], k_h=4, k_w=4, name='adv_g_dec1', with_w=True)
            deconv1 = layer_norm(deconv1, name="adv_g_dec1ln")
            input = tf.concat([deconv1, conv3], axis=3)
            deconv2, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s4, s4, ngf*2], k_h=4, k_w=4, name='adv_g_dec2', with_w=True)
            deconv2 = layer_norm(deconv2, name="adv_g_dec2ln")
            input = tf.concat([deconv2, conv2], axis=3)
            deconv3, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, s2, s2, ngf], k_h=4, k_w=4, name='adv_g_dec3', with_w=True)
            deconv3 = layer_norm(deconv3, name="adv_g_dec3ln")
            input = tf.concat([deconv3, conv1], axis=3)
            deconv4, _, _ = deconv2d(tf.nn.relu(input), [self.batch_size, output_size, output_size, 3], k_h=4, k_w=4, name='adv_g_dec4', with_w=True)

            return tf.tanh(deconv4)
예제 #2
0
파일: model.py 프로젝트: dcfucheng/LAPGAN
 def discriminator(self, x, reuse=None):
     with tf.variable_scope('discriminator', reuse=reuse):
         h0 = lrelu(conv2d(x, self.df_dim, name='d_h0_conv'))
         h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
         h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
         h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
         h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
         return tf.nn.sigmoid(h4)
예제 #3
0
def discriminator(image, reuse=False):
    d_bn1 = ops.batch_norm(FLAGS.batch_size, name='d_bn1')
    d_bn2 = ops.batch_norm(FLAGS.batch_size, name='d_bn2')
    d_bn3 = ops.batch_norm(FLAGS.batch_size, name='d_bn3')
    image = tf.reshape(image, [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 1])
    if reuse: tf.get_variable_scope().reuse_variables()
    h0 = ops.lrelu(ops.conv2d(image, FLAGS.df_dim, name='d_h0_conv'))
    h1 = ops.lrelu(d_bn1(ops.conv2d(h0, FLAGS.df_dim * 2, name='d_h1_conv')))
    h2 = ops.lrelu(d_bn2(ops.conv2d(h1, FLAGS.df_dim * 4, name='d_h2_conv')))
    h3 = ops.lrelu(d_bn3(ops.conv2d(h2, FLAGS.df_dim * 8, name='d_h3_conv')))
    h4 = ops.linear(tf.reshape(h3, [FLAGS.batch_size, -1]), 1, 'd_h3_lin')
    return tf.nn.sigmoid(h4)
예제 #4
0
    def tower(bn, suffix):
        assert not self.y_dim
        print "\ttower "+suffix
        h0 = lrelu(bn(conv2d(noisy_image, self.df_dim, name='d_h0_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_0" + suffix))
        print "\th0 ", h0.get_shape()
        h1 = lrelu(bn(conv2d(h0, self.df_dim * 2, name='d_h1_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_1" + suffix))
        print "\th1 ", h1.get_shape()
        h2 = lrelu(bn(conv2d(h1, self.df_dim * 4, name='d_h2_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_2" + suffix))
        print "\th2 ", h2.get_shape()

        h3 = lrelu(bn(conv2d(h2, self.df_dim*4, name='d_h3_conv' + suffix, d_h=1, d_w=1,
            k_w=3, k_h=3), "d_bn_3" + suffix))
        print "\th3 ", h3.get_shape()
        h4 = lrelu(bn(conv2d(h3, self.df_dim*4, name='d_h4_conv' + suffix, d_h=1, d_w=1,
            k_w=3, k_h=3), "d_bn_4" + suffix))
        print "\th4 ", h4.get_shape()
        h5 = lrelu(bn(conv2d(h4, self.df_dim*8, name='d_h5_conv' + suffix, d_h=2, d_w=2,
            k_w=3, k_h=3), "d_bn_5" + suffix))
        print "\th5 ", h5.get_shape()

        h6 = lrelu(bn(conv2d(h5, self.df_dim*8, name='d_h6_conv' + suffix,
            k_w=3, k_h=3), "d_bn_6" + suffix))
        print "\th6 ", h6.get_shape()
        # return tf.reduce_mean(h6, [1, 2])
        h6_reshaped = tf.reshape(h6, [batch_size, -1])
        print '\th6_reshaped: ', h6_reshaped.get_shape()

        h7 = lrelu(bn(linear(h6_reshaped, self.df_dim * 40, scope="d_h7" + suffix), "d_bn_7" + suffix))

        return h7
    def discriminate(self, x_var, reuse=False):

        with tf.variable_scope("discriminator") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(conv2d(x_var, output_dim=64, name='dis_conv1'))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1'))
            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2'))
            conv4 = conv2d(conv3, output_dim=512, name='dis_conv4')
            middle_conv = conv4
            conv4 = lrelu(instance_norm(conv4, scope='dis_bn3'))
            conv5 = lrelu(instance_norm(conv2d(conv4, output_dim=1024, name='dis_conv5'), scope='dis_bn4'))

            conv6 = conv2d(conv5, output_dim=2, k_w=4, k_h=4, d_h=1, d_w=1, padding='VALID', name='dis_conv6')

            return conv6, middle_conv
    def encode_decode_1(self, x, reuse=False):

        with tf.variable_scope("encode_decode_1") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
            # for x_{1}
            de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
                                                  , name='e_d1', k_h=3, k_w=3), scope='e_in4'))
            de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
                                                  , name='e_d2', k_w=3, k_h=3), scope='e_in5'))
            x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')

            return x_tilde1
예제 #7
0
파일: model.py 프로젝트: liuaishan/AdvPGAN
    def naive_discriminator(self, image, y = None, reuse = False):
        with tf.variable_scope("discriminator") as scope:

            # image is 128 x 128 x (input_c_dim + output_c_dim)
            if reuse:
                tf.get_variable_scope().reuse_variables()
            else:
                assert tf.get_variable_scope().reuse == False

            h0 = lrelu(conv2d(image,  self.df_dim, name='adv_d_h0_conv'))
            # h0 is (128 x 128 x self.df_dim)
            h1 = lrelu(layer_norm((conv2d(h0, self.df_dim * 2, name='adv_d_h1_conv')), name="adv_d_ln1"))
            # h1 is (64 x 64 x self.df_dim*2)
            h2 = lrelu(layer_norm(conv2d(h1, self.df_dim * 4, name='adv_d_h2_conv'), name="adv_d_ln2"))
            # h2 is (32x 32 x self.df_dim*4)
            h3 = lrelu(layer_norm(conv2d(h2, self.df_dim * 8, d_h=1, d_w=1, name='adv_d_h3_conv'), name="adv_d_ln3"))
            # h3 is (16 x 16 x self.df_dim*8)
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'adv_d_h3_lin')

            return tf.nn.sigmoid(h4), h4
예제 #8
0
 def discriminator(self, images, image_size, reuse=False):
     image_size /= 64
     with tf.variable_scope('discriminator', reuse=reuse):
         gd_h0 = lrelu(conv2d(images, 64, name="d_gd_h0_conv"))
         gd_h1 = lrelu(conv2d(gd_h0, 128, name='d_gd_h1_conv'))
         gd_h2 = lrelu(conv2d(gd_h1, 256, name='d_gd_h2_conv'))
         gd_h3 = lrelu(conv2d(gd_h2, 512, name='d_gd_h3_conv'))
         gd_h4 = lrelu(conv2d(gd_h3, 512, name='d_gd_h4_conv'))
         gd_h5 = lrelu(conv2d(gd_h4, 512, name='d_gd_h5_conv'))
         gd_h = linear(tf.reshape(
             gd_h5, [self.batch_size, int(512 * image_size * image_size)]), 64 * image_size * image_size, 'd_gd_linear')
         return linear(gd_h, 1, 'd_linear')
예제 #9
0
    def discriminator(self, images, image_size, reuse=False):
        image_size /= 64
        with tf.variable_scope('discriminator', reuse=reuse):
            gd_h0 = lrelu(conv2d(images, 64, name="d_gd_h0_conv"))
            gd_h1 = lrelu(self.d_bns[0](conv2d(gd_h0, 128, name='d_gd_h1_conv')))
            gd_h2 = lrelu(self.d_bns[1](conv2d(gd_h1, 256, name='d_gd_h2_conv')))
            gd_h3 = lrelu(self.d_bns[2](conv2d(gd_h2, 512, name='d_gd_h3_conv')))
            gd_h4 = lrelu(self.d_bns[3](conv2d(gd_h3, 512, name='d_gd_h4_conv')))
            gd_h5 = lrelu(self.d_bns[4](conv2d(gd_h4, 512, name='d_gd_h5_conv')))
            gd_h = linear(tf.reshape(
                gd_h5, [self.batch_size, int(512 * image_size * image_size)]), 64 * image_size * image_size, 'd_gd_linear')

            #ld_h0 = lrelu(conv2d(masked_images, 64, name="d_ld_h0_conv"))
            #ld_h1 = lrelu(self.local_d_bns[0](conv2d(ld_h0, 128, name='d_ld_h1_conv')))
            #ld_h2 = lrelu(self.local_d_bns[1](conv2d(ld_h1, 256, name='d_ld_h2_conv')))
            #ld_h3 = lrelu(self.local_d_bns[2](conv2d(ld_h2, 512, name='d_ld_h3_conv')))
            #ld_h4 = lrelu(self.local_d_bns[3](conv2d(ld_h3, 512, name='d_ld_h4_conv')))
            #ld_h = linear(tf.reshape(
            #    ld_h4, [self.batch_size, int(512 * image_size * image_size)]), 64 * image_size * image_size, 'd_ld_linear')

            #h = linear(tf.concat([gd_h, ld_h], 1), 1, 'd_linear')
            h = linear(gd_h, 1, 'd_linear')
            return tf.nn.sigmoid(h), h
예제 #10
0
파일: PGGAN.py 프로젝트: boneanxs/PGGAN
    def generate(self, z_var, pg=1, t=False, alpha_trans=0.0):

        with tf.variable_scope('generator') as scope:

            de = tf.reshape(
                z_var,
                [self.batch_size, 1, 1,
                 tf.cast(self.get_nf(1), tf.int32)])
            de = conv2d(de,
                        output_dim=self.get_nf(1),
                        k_h=4,
                        k_w=4,
                        d_w=1,
                        d_h=1,
                        padding='Other',
                        name='gen_n_1_conv')
            de = Pixl_Norm(lrelu(de))
            de = tf.reshape(
                de, [self.batch_size, 4, 4,
                     tf.cast(self.get_nf(1), tf.int32)])
            de = conv2d(de,
                        output_dim=self.get_nf(1),
                        d_w=1,
                        d_h=1,
                        name='gen_n_2_conv')
            de = Pixl_Norm(lrelu(de))

            for i in range(pg - 1):

                if i == pg - 2 and t:
                    #To RGB
                    de_iden = conv2d(de,
                                     output_dim=3,
                                     k_w=1,
                                     k_h=1,
                                     d_w=1,
                                     d_h=1,
                                     name='gen_y_rgb_conv_{}'.format(
                                         de.shape[1]))
                    de_iden = upscale(de_iden, 2)

                de = upscale(de, 2)
                de = Pixl_Norm(
                    lrelu(
                        conv2d(de,
                               output_dim=self.get_nf(i + 1),
                               d_w=1,
                               d_h=1,
                               name='gen_n_conv_1_{}'.format(de.shape[1]))))
                de = Pixl_Norm(
                    lrelu(
                        conv2d(de,
                               output_dim=self.get_nf(i + 1),
                               d_w=1,
                               d_h=1,
                               name='gen_n_conv_2_{}'.format(de.shape[1]))))

            #To RGB
            de = conv2d(de,
                        output_dim=3,
                        k_w=1,
                        k_h=1,
                        d_w=1,
                        d_h=1,
                        name='gen_y_rgb_conv_{}'.format(de.shape[1]))

            if pg == 1:
                return de

            if t:
                de = (1 - alpha_trans) * de_iden + alpha_trans * de

            else:
                de = de

            return de
예제 #11
0
    def tower(bn, suffix):
        assert not self.y_dim
        print "\ttower " + suffix
        h0 = lrelu(
            bn(
                conv2d(noisy_image,
                       self.df_dim,
                       name='d_h0_conv' + suffix,
                       d_h=2,
                       d_w=2,
                       k_w=3,
                       k_h=3), "d_bn_0" + suffix))
        print "\th0 ", h0.get_shape()
        h1 = lrelu(
            bn(
                conv2d(h0,
                       self.df_dim * 2,
                       name='d_h1_conv' + suffix,
                       d_h=2,
                       d_w=2,
                       k_w=3,
                       k_h=3), "d_bn_1" + suffix))
        print "\th1 ", h1.get_shape()
        h2 = lrelu(
            bn(
                conv2d(h1,
                       self.df_dim * 4,
                       name='d_h2_conv' + suffix,
                       d_h=2,
                       d_w=2,
                       k_w=3,
                       k_h=3), "d_bn_2" + suffix))
        print "\th2 ", h2.get_shape()

        h3 = lrelu(
            bn(
                conv2d(h2,
                       self.df_dim * 4,
                       name='d_h3_conv' + suffix,
                       d_h=1,
                       d_w=1,
                       k_w=3,
                       k_h=3), "d_bn_3" + suffix))
        print "\th3 ", h3.get_shape()
        h4 = lrelu(
            bn(
                conv2d(h3,
                       self.df_dim * 4,
                       name='d_h4_conv' + suffix,
                       d_h=1,
                       d_w=1,
                       k_w=3,
                       k_h=3), "d_bn_4" + suffix))
        print "\th4 ", h4.get_shape()
        h5 = lrelu(
            bn(
                conv2d(h4,
                       self.df_dim * 8,
                       name='d_h5_conv' + suffix,
                       d_h=2,
                       d_w=2,
                       k_w=3,
                       k_h=3), "d_bn_5" + suffix))
        print "\th5 ", h5.get_shape()

        h6 = lrelu(
            bn(
                conv2d(h5,
                       self.df_dim * 8,
                       name='d_h6_conv' + suffix,
                       k_w=3,
                       k_h=3), "d_bn_6" + suffix))
        print "\th6 ", h6.get_shape()
        # return tf.reduce_mean(h6, [1, 2])
        h6_reshaped = tf.reshape(h6, [batch_size, -1])
        print '\th6_reshaped: ', h6_reshaped.get_shape()

        h7 = lrelu(
            bn(linear(h6_reshaped, self.df_dim * 40, scope="d_h7" + suffix),
               "d_bn_7" + suffix))

        return h7
예제 #12
0
파일: network.py 프로젝트: remicres/sr4rs
def generator(lr_image, scope, nchannels, nresblocks, dim):
    """
    Generator
    """
    hr_images = dict()

    def conv_upsample(x, dim, ksize, name):
        y = upscale2d_conv2d(x, dim, ksize, name)
        y = blur2d(y)
        y = lrelu(y)
        y = pixel_norm(y)
        return y

    def _residule_block(x, dim, name):
        with tf.compat.v1.variable_scope(name):
            y = conv(x, dim, 3, 1, "conv1")
            y = lrelu(y)
            y = pixel_norm(y)
            y = conv(y, dim, 3, 1, "conv2")
            y = pixel_norm(y)
            return y + x

    def conv_bn(x, dim, ksize, name):
        y = conv(x, dim, ksize, 1, name)
        y = lrelu(y)
        y = pixel_norm(y)
        return y

    def _make_output(net, factor):
        hr_images[factor] = conv(net, nchannels, 1, 1, "output")

    with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):
        with tf.compat.v1.variable_scope("encoder"):
            net = lrelu(conv(lr_image, dim, 9, 1, "conv1_9x9"))
            conv1 = net
            for i in range(nresblocks):
                net = _residule_block(net,
                                      dim=dim,
                                      name="ResBlock{}".format(i))

        with tf.compat.v1.variable_scope("res_1x"):
            net = conv(net, dim, 3, 1, "conv1")
            net = pixel_norm(net)
            net += conv1
            _make_output(net, factor=4)

        with tf.compat.v1.variable_scope("res_2x"):
            net = conv_upsample(net, 4 * dim, 3, "conv_upsample")
            net = conv_bn(net, 4 * dim, 3, "conv1")
            net = conv_bn(net, 4 * dim, 3, "conv2")
            net = conv_bn(net, 4 * dim, 5, "conv3")
            _make_output(net, factor=2)

        with tf.compat.v1.variable_scope("res_4x"):
            net = conv_upsample(net, 4 * dim, 3, "conv_upsample")
            net = conv_bn(net, 4 * dim, 3, "conv1")
            net = conv_bn(net, 4 * dim, 3, "conv2")
            net = conv_bn(net, 4 * dim, 9, "conv3")
            _make_output(net, factor=1)

        return hr_images
예제 #13
0
파일: network.py 프로젝트: remicres/sr4rs
 def _conv_downsample(x, dim, ksize, name):
     y = conv2d_downscale2d(x, dim, ksize, name=name)
     y = lrelu(y)
     return y
예제 #14
0
파일: network.py 프로젝트: remicres/sr4rs
 def conv_bn(x, dim, ksize, name):
     y = conv(x, dim, ksize, 1, name)
     y = lrelu(y)
     y = pixel_norm(y)
     return y
예제 #15
0
파일: network.py 프로젝트: remicres/sr4rs
 def conv_upsample(x, dim, ksize, name):
     y = upscale2d_conv2d(x, dim, ksize, name)
     y = blur2d(y)
     y = lrelu(y)
     y = pixel_norm(y)
     return y
def rn_generator(x, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()

    # Encoder
    conv_1 = ops.lrelu(
        ops.cnn_2d(x,
                   weight_shape=[4, 4, 3, 64],
                   strides=[1, 2, 2, 1],
                   name='g_rn_e_conv_1'))
    conv_2 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_1,
                                  weight_shape=[4, 4, 64, 128],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_2'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_2'))
    conv_3 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_2,
                                  weight_shape=[4, 4, 128, 256],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_3'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_3'))
    conv_4 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_3,
                                  weight_shape=[4, 4, 256, 512],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_4'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_4'))
    conv_5 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_4,
                                  weight_shape=[4, 4, 512, 512],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_5'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_5'))
    conv_6 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_5,
                                  weight_shape=[4, 4, 512, 512],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_6'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_6'))
    conv_7 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_6,
                                  weight_shape=[4, 4, 512, 512],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_7'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_7'))
    conv_8 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_7,
                                  weight_shape=[4, 4, 512, 512],
                                  strides=[1, 2, 2, 1],
                                  name='g_rn_e_conv_8'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_e_batch_Norm_8'))

    # Decoder
    dconv_1 = ops.lrelu(
        tf.nn.dropout(ops.batch_norm(ops.cnn_2d_trans(
            conv_8,
            weight_shape=[2, 2, 512, 512],
            strides=[1, 2, 2, 1],
            output_shape=[
                mc.batch_size,
                conv_8.get_shape()[1].value + 1,
                conv_8.get_shape()[2].value + 1, 512
            ],
            name='g_rn_d_dconv_1'),
                                     center=True,
                                     scale=True,
                                     is_training=True,
                                     scope='g_rn_d_batch_Norm_1'),
                      keep_prob=0.5))
    dconv_1 = tf.concat([dconv_1, conv_7], axis=3)
    dconv_2 = ops.lrelu(
        tf.nn.dropout(ops.batch_norm(ops.cnn_2d_trans(
            dconv_1,
            weight_shape=[4, 4, 512, 1024],
            strides=[1, 2, 2, 1],
            output_shape=[
                mc.batch_size,
                dconv_1.get_shape()[1].value * 2 - 1,
                dconv_1.get_shape()[2].value * 2, 512
            ],
            name='g_rn_d_dconv_2'),
                                     center=True,
                                     scale=True,
                                     is_training=True,
                                     scope='g_rn_d_batch_Norm_2'),
                      keep_prob=0.5))
    dconv_2 = tf.concat([dconv_2, conv_6], axis=3)
    dconv_3 = ops.lrelu(
        tf.nn.dropout(ops.batch_norm(ops.cnn_2d_trans(
            dconv_2,
            weight_shape=[4, 4, 512, 1024],
            strides=[1, 2, 2, 1],
            output_shape=[
                mc.batch_size,
                dconv_2.get_shape()[1].value * 2 - 1,
                dconv_2.get_shape()[2].value * 2 - 1, 512
            ],
            name='g_rn_d_dconv_3'),
                                     center=True,
                                     scale=True,
                                     is_training=True,
                                     scope='g_rn_d_batch_Norm_3'),
                      keep_prob=0.5))
    dconv_3 = tf.concat([dconv_3, conv_5], axis=3)
    dconv_4 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d_trans(dconv_3,
                                        weight_shape=[4, 4, 512, 1024],
                                        strides=[1, 2, 2, 1],
                                        output_shape=[
                                            mc.batch_size,
                                            dconv_3.get_shape()[1].value * 2,
                                            dconv_3.get_shape()[2].value * 2,
                                            512
                                        ],
                                        name='g_rn_d_dconv_4'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_d_batch_Norm_4'))
    dconv_4 = tf.concat([dconv_4, conv_4], axis=3)
    dconv_5 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d_trans(dconv_4,
                                        weight_shape=[4, 4, 256, 1024],
                                        strides=[1, 2, 2, 1],
                                        output_shape=[
                                            mc.batch_size,
                                            dconv_4.get_shape()[1].value * 2,
                                            dconv_4.get_shape()[2].value * 2,
                                            256
                                        ],
                                        name='g_rn_d_dconv_5'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_d_batch_Norm_5'))
    dconv_5 = tf.concat([dconv_5, conv_3], axis=3)
    dconv_6 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d_trans(dconv_5,
                                        weight_shape=[4, 4, 128, 512],
                                        strides=[1, 2, 2, 1],
                                        output_shape=[
                                            mc.batch_size,
                                            dconv_5.get_shape()[1].value * 2,
                                            dconv_5.get_shape()[2].value * 2,
                                            128
                                        ],
                                        name='g_rn_d_dconv_6'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_d_batch_Norm_6'))
    dconv_6 = tf.concat([dconv_6, conv_2], axis=3)
    dconv_7 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d_trans(dconv_6,
                                        weight_shape=[4, 4, 64, 256],
                                        strides=[1, 2, 2, 1],
                                        output_shape=[
                                            mc.batch_size,
                                            dconv_6.get_shape()[1].value * 2,
                                            dconv_6.get_shape()[2].value * 2,
                                            64
                                        ],
                                        name='g_rn_d_dconv_7'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='g_rn_d_batch_Norm_7'))
    dconv_7 = tf.concat([dconv_7, conv_1], axis=3)
    dconv_8 = tf.nn.tanh(
        ops.cnn_2d_trans(dconv_7,
                         weight_shape=[4, 4, 3, 128],
                         strides=[1, 2, 2, 1],
                         output_shape=[
                             mc.batch_size,
                             dconv_7.get_shape()[1].value * 2,
                             dconv_7.get_shape()[2].value * 2, 3
                         ],
                         name='g_rn_d_dconv_8'))
    return dconv_8
예제 #17
0
def ali_encoder(opts, inputs, is_training=False, reuse=False):
    num_units = opts['e_num_filters']
    layer_params = []
    layer_params.append([5, 1, num_units / 8])
    layer_params.append([4, 2, num_units / 4])
    layer_params.append([4, 1, num_units / 2])
    layer_params.append([4, 2, num_units])
    layer_params.append([4, 1, num_units * 2])
    # For convolution: (n - k) / stride + 1 = s
    # For transposed: (s - 1) * stride + k = n
    layer_x = inputs
    height = int(layer_x.get_shape()[1])
    width = int(layer_x.get_shape()[2])
    assert height == width
    for i, (kernel, stride, channels) in enumerate(layer_params):
        height = (height - kernel) / stride + 1
        width = height
        layer_x = ops.conv2d(opts,
                             layer_x,
                             channels,
                             d_h=stride,
                             d_w=stride,
                             scope='h%d_conv' % i,
                             conv_filters_dim=kernel,
                             padding='VALID')
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts,
                                     layer_x,
                                     is_training,
                                     reuse,
                                     scope='h%d_bn' % i)
        layer_x = ops.lrelu(layer_x, 0.1)
    assert height == 1
    assert width == 1

    # Then two 1x1 convolutions.
    layer_x = ops.conv2d(opts,
                         layer_x,
                         num_units * 2,
                         d_h=1,
                         d_w=1,
                         scope='conv2d_1x1',
                         conv_filters_dim=1)
    if opts['batch_norm']:
        layer_x = ops.batch_norm(opts,
                                 layer_x,
                                 is_training,
                                 reuse,
                                 scope='hfinal_bn')
    layer_x = ops.lrelu(layer_x, 0.1)
    layer_x = ops.conv2d(opts,
                         layer_x,
                         num_units / 2,
                         d_h=1,
                         d_w=1,
                         scope='conv2d_1x1_2',
                         conv_filters_dim=1)

    if opts['e_noise'] != 'gaussian':
        res = ops.linear(opts, layer_x, opts['zdim'], scope='hlast_lin')
        return res
    else:
        mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin')
        log_sigmas = ops.linear(opts,
                                layer_x,
                                opts['zdim'],
                                scope='log_sigmas_lin')
        return mean, log_sigmas
예제 #18
0
파일: PGGAN.py 프로젝트: boneanxs/PGGAN
    def discriminate(self, conv, reuse=False, pg=1, t=False, alpha_trans=0.01):

        #dis_as_v = []
        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()
            if t:
                conv_iden = avgpool2d(conv)
                #from RGB
                conv_iden = lrelu(
                    conv2d(conv_iden,
                           output_dim=self.get_nf(pg - 2),
                           k_w=1,
                           k_h=1,
                           d_h=1,
                           d_w=1,
                           name='dis_y_rgb_conv_{}'.format(
                               conv_iden.shape[1])))
            # fromRGB
            conv = lrelu(
                conv2d(conv,
                       output_dim=self.get_nf(pg - 1),
                       k_w=1,
                       k_h=1,
                       d_w=1,
                       d_h=1,
                       name='dis_y_rgb_conv_{}'.format(conv.shape[1])))
            for i in range(pg - 1):

                conv = lrelu(
                    conv2d(conv,
                           output_dim=self.get_nf(pg - 1 - i),
                           d_h=1,
                           d_w=1,
                           name='dis_n_conv_1_{}'.format(conv.shape[1])))
                conv = lrelu(
                    conv2d(conv,
                           output_dim=self.get_nf(pg - 2 - i),
                           d_h=1,
                           d_w=1,
                           name='dis_n_conv_2_{}'.format(conv.shape[1])))
                conv = avgpool2d(conv, 2)
                if i == 0 and t:
                    conv = alpha_trans * conv + (1 - alpha_trans) * conv_iden

            conv = MinibatchstateConcat(conv)
            conv = lrelu(
                conv2d(conv,
                       output_dim=self.get_nf(1),
                       k_w=3,
                       k_h=3,
                       d_h=1,
                       d_w=1,
                       name='dis_n_conv_1_{}'.format(conv.shape[1])))
            conv = lrelu(
                conv2d(conv,
                       output_dim=self.get_nf(1),
                       k_w=4,
                       k_h=4,
                       d_h=1,
                       d_w=1,
                       padding='VALID',
                       name='dis_n_conv_2_{}'.format(conv.shape[1])))
            conv = tf.reshape(conv, [self.batch_size, -1])

            #for D
            output = fully_connect(conv, output_size=1, scope='dis_n_fully')

            return tf.nn.sigmoid(output), output
def rn_discriminator(x, reuse=False):
    if reuse:
        tf.get_variable_scope().reuse_variables()

    conv_1 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(x,
                                  weight_shape=[4, 4, 6, 64],
                                  strides=[1, 2, 2, 1],
                                  name='d_rn_conv_1'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='d_rn_batch_Norm_1'))
    conv_2 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_1,
                                  weight_shape=[4, 4, 64, 128],
                                  strides=[1, 2, 2, 1],
                                  name='d_rn_conv_2'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='d_rn_batch_Norm_2'))
    conv_3 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_2,
                                  weight_shape=[4, 4, 128, 256],
                                  strides=[1, 2, 2, 1],
                                  name='d_rn_conv_3'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='d_rn_batch_Norm_3'))
    conv_4 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_3,
                                  weight_shape=[4, 4, 256, 512],
                                  strides=[1, 2, 2, 1],
                                  name='d_rn_conv_4'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='d_rn_batch_Norm_4'))
    conv_5 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_4,
                                  weight_shape=[4, 4, 512, 512],
                                  strides=[1, 2, 2, 1],
                                  name='d_rn_conv_5'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='d_rn_batch_Norm_5'))
    conv_6 = ops.lrelu(
        ops.batch_norm(ops.cnn_2d(conv_5,
                                  weight_shape=[4, 4, 512, 512],
                                  strides=[1, 2, 2, 1],
                                  name='d_rn_conv_6'),
                       center=True,
                       scale=True,
                       is_training=True,
                       scope='d_rn_batch_Norm_6'))
    conv_6 = tf.reshape(conv_6, [-1, 5 * 6 * 512])
    output = ops.dense(conv_6, 5 * 6 * 512, 1, name='d_rn_output')
    return output
예제 #20
0
def DiscriminatorCNN(image, config, reuse=None):
    '''
    Discriminator for GAN model.

    image      : batch_size x 64x64x3 image
    config     : see causal_dcgan/config.py
    reuse      : pass True if not calling for first time

    returns: probabilities(real)
           : logits(real)
           : first layer activation used to estimate z from
           : variables list
    '''
    with tf.variable_scope("discriminator", reuse=reuse) as vs:
        d_bn1 = batch_norm(name='d_bn1')
        d_bn2 = batch_norm(name='d_bn2')
        d_bn3 = batch_norm(name='d_bn3')

        if not config.stab_proj:
            h0 = lrelu(conv2d(image, config.df_dim,
                              name='d_h0_conv'))  #16,32,32,64

        else:  #method to restrict disc from winning
            #I think this is equivalent to just not letting disc optimize first layer
            #and also removing nonlinearity

            #k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
            #paper used 8x8 kernel, but I'm using 5x5 because it is more similar to my achitecture
            #n_projs=config.df_dim#64 instead of 32 in paper
            n_projs = config.n_stab_proj  #64 instead of 32 in paper

            print("WARNING:STAB_PROJ active, using ", n_projs, " projections")

            w_proj = tf.get_variable(
                'w_proj', [5, 5, image.get_shape()[-1], n_projs],
                initializer=tf.truncated_normal_initializer(stddev=0.02),
                trainable=False)
            conv = tf.nn.conv2d(image,
                                w_proj,
                                strides=[1, 2, 2, 1],
                                padding='SAME')

            b_proj = tf.get_variable(
                'b_proj',
                [n_projs],  #does nothing
                initializer=tf.constant_initializer(0.0),
                trainable=False)
            h0 = tf.nn.bias_add(conv, b_proj)

        h1_ = lrelu(d_bn1(conv2d(h0, config.df_dim * 2,
                                 name='d_h1_conv')))  #16,16,16,128

        h1 = add_minibatch_features(h1_, config.df_dim)
        h2 = lrelu(d_bn2(conv2d(h1, config.df_dim * 4,
                                name='d_h2_conv')))  #16,16,16,248
        h3 = lrelu(d_bn3(conv2d(h2, config.df_dim * 8, name='d_h3_conv')))
        #print('h3shape: ',h3.get_shape().as_list())
        #print('8df_dim:',config.df_dim*8)
        #dim3=tf.reduce_prod(tf.shape(h3)[1:])
        dim3 = np.prod(h3.get_shape().as_list()[1:])
        h3_flat = tf.reshape(h3, [-1, dim3])
        h4 = linear(h3_flat, 1, 'd_h3_lin')

        prob = tf.nn.sigmoid(h4)

        variables = tf.contrib.framework.get_variables(
            vs, collection=tf.GraphKeys.TRAINABLE_VARIABLES)

    return prob, h4, h1_, variables
예제 #21
0
def ali_decoder(opts, noise, is_training=False, reuse=False):
    output_shape = datashapes[opts['dataset']]
    batch_size = tf.shape(noise)[0]
    noise_size = int(noise.get_shape()[1])
    data_height = output_shape[0]
    data_width = output_shape[1]
    data_channels = output_shape[2]
    noise = tf.reshape(noise, [-1, 1, 1, noise_size])
    num_units = opts['g_num_filters']
    layer_params = []
    layer_params.append([4, 1, num_units])
    layer_params.append([4, 2, num_units / 2])
    layer_params.append([4, 1, num_units / 4])
    layer_params.append([4, 2, num_units / 8])
    layer_params.append([5, 1, num_units / 8])
    # For convolution: (n - k) / stride + 1 = s
    # For transposed: (s - 1) * stride + k = n
    layer_x = noise
    height = 1
    width = 1
    for i, (kernel, stride, channels) in enumerate(layer_params):
        height = (height - 1) * stride + kernel
        width = height
        layer_x = ops.deconv2d(opts,
                               layer_x, [batch_size, height, width, channels],
                               d_h=stride,
                               d_w=stride,
                               scope='h%d_deconv' % i,
                               conv_filters_dim=kernel,
                               padding='VALID')
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts,
                                     layer_x,
                                     is_training,
                                     reuse,
                                     scope='h%d_bn' % i)
        layer_x = ops.lrelu(layer_x, 0.1)
    assert height == data_height
    assert width == data_width

    # Then two 1x1 convolutions.
    layer_x = ops.conv2d(opts,
                         layer_x,
                         num_units / 8,
                         d_h=1,
                         d_w=1,
                         scope='conv2d_1x1',
                         conv_filters_dim=1)
    if opts['batch_norm']:
        layer_x = ops.batch_norm(opts,
                                 layer_x,
                                 is_training,
                                 reuse,
                                 scope='hfinal_bn')
    layer_x = ops.lrelu(layer_x, 0.1)
    layer_x = ops.conv2d(opts,
                         layer_x,
                         data_channels,
                         d_h=1,
                         d_w=1,
                         scope='conv2d_1x1_2',
                         conv_filters_dim=1)
    if opts['input_normalize_sym']:
        return tf.nn.tanh(layer_x), layer_x
    else:
        return tf.nn.sigmoid(layer_x), layer_x
예제 #22
0
 def forward(self, h, is_training):
     h = self.fc1(h)
     h = self.bn1(h)
     h = lrelu(h)
     h = self.fc2(h)
     return torch.tanh(h)