def dis_net(data_array , y , weights , biases , reuse=False):

    # mnist data's shape is (28 , 28 , 1)

    y = tf.reshape(y , shape=[batch_size, 1 , 1 , y_dim])
    # concat
    data_array = conv_cond_concat(data_array , y)

    conv1 = conv2d(data_array , weights['wc1'] , biases['bc1'])

    tf.add_to_collection('weight_1', weights['wc1'])

    conv1 = lrelu(conv1)

    tf.add_to_collection('ac_1' , conv1)

    conv2 = conv2d(conv1 , weights['wc2']  , biases['bc2'])
    conv2 = batch_normal(conv2 ,scope="dis_bn1" , reuse=reuse)
    conv2 = lrelu(conv2)

    tf.add_to_collection('weight_2', weights['wc2'])

    tf.add_to_collection('ac_2', conv2)

    conv2 = tf.reshape(conv2 , [batch_size , -1])

    f1 = fully_connect(conv2 ,weights['wc3'] , biases['bc3'])
    f1 = batch_normal(f1 , scope="dis_bn2" , reuse=reuse)
    f1 = lrelu(f1)

    out = fully_connect( f1 , weights['wd'] , biases['bd'])

    return tf.nn.sigmoid(out) , out
Example #2
0
    def discriminate(self, x_var, y, weights, biases, reuse=False):

        y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
        x_var = conv_cond_concat(x_var, y1)

        conv1 = lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))

        conv1 = conv_cond_concat(conv1, y1)

        conv2 = lrelu(
            batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']),
                         scope='dis_bn1',
                         reuse=reuse))

        conv2 = tf.reshape(conv2, [self.batch_size, -1])

        conv2 = tf.concat([conv2, y], 1)

        fc1 = lrelu(
            batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']),
                         scope='dis_bn2',
                         reuse=reuse))

        fc1 = tf.concat([fc1, y], 1)
        #for D
        output = fully_connect(fc1, weights['wd'], biases['bd'])

        return tf.nn.sigmoid(output)
def sample_net(batch_size, z, y, output_size):
    z = tf.concat([z, y], 1)
    yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
    c1, c2 = output_size / 4, output_size / 2

    # 10 stand for the num of labels
    d1 = fully_connect(z, weights2['wd'], biases2['bd'])
    d1 = batch_normal(d1, scope="genbn1", reuse=True)
    d1 = tf.nn.relu(d1)
    d1 = tf.concat([d1, y], 1)

    d2 = fully_connect(d1, weights2['wc1'], biases2['bc1'])
    d2 = batch_normal(d2, scope="genbn2", reuse=True)
    d2 = tf.nn.relu(d2)
    d2 = tf.reshape(d2, [batch_size, c1, c1, 64 * 2])
    d2 = conv_cond_concat(d2, yb)

    d3 = de_conv(d2,
                 weights2['wc2'],
                 biases2['bc2'],
                 out_shape=[batch_size, c2, c2, 128])
    d3 = batch_normal(d3, scope="genbn3", reuse=True)
    d3 = tf.nn.relu(d3)
    d3 = conv_cond_concat(d3, yb)

    d4 = de_conv(d3,
                 weights2['wc3'],
                 biases2['bc3'],
                 out_shape=[batch_size, output_size, output_size, channel])

    return tf.nn.sigmoid(d4)
Example #4
0
    def Encode(self, img, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()
            conv1 = tf.nn.relu(
                batch_normal(conv2d(img, output_dim=64, name='e_c1'),
                             scope='e_bn1',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('conv1_shape', conv1.get_shape())
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('conv2_shape', conv2.get_shape())
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('conv3_shape', conv3.get_shape())
            conv3_before_fc = conv3
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4',
                             reuse=reuse,
                             isTrain=self.isTrain))
            z_mean = fully_connect(fc1, output_size=128, scope='e_f2')
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')
            return z_mean, z_sigma, conv1, conv2, conv3_before_fc  #应该是激活之前的值,还是激活之后的值呢?
    def dis_net(self, images, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # mnist data's shape is (28 , 28 , 1)
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # concat
            concat_data = conv_cond_concat(images, yb)

            conv1, w1 = conv2d(concat_data, output_dim=10, name='dis_conv1')
            tf.add_to_collection('weight_1', w1)

            conv1 = lrelu(conv1)
            conv1 = conv_cond_concat(conv1, yb)
            tf.add_to_collection('ac_1', conv1)


            conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
            tf.add_to_collection('weight_2', w2)

            conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
            tf.add_to_collection('ac_2', conv2)

            conv2 = tf.reshape(conv2, [self.batch_size, -1])
            conv2 = tf.concat([conv2, y], 1)

            f1 = lrelu(batch_normal(fully_connect(conv2, output_size=1024, scope='dis_fully1'), scope='dis_bn2', reuse=reuse))
            f1 = tf.concat([f1, y], 1)

            out = fully_connect(f1, output_size=1, scope='dis_fully2',  initializer = xavier_initializer())

            return tf.nn.sigmoid(out), out
Example #6
0
    def Encode_AE(self, img, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()
            conv1 = tf.nn.relu(
                batch_normal(conv2d(img, output_dim=64, name='e_c1'),
                             scope='e_bn1',
                             reuse=reuse,
                             isTrain=self.isTrain))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2',
                             reuse=reuse,
                             isTrain=self.isTrain))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3',
                             reuse=reuse,
                             isTrain=self.isTrain))
            conv3_before_fc = conv3
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4',
                             reuse=reuse,
                             isTrain=self.isTrain))
            z_x = tf.nn.relu(
                batch_normal(fully_connect(fc1, output_size=128, scope='e_f2'),
                             scope='e_bn5',
                             reuse=reuse,
                             isTrain=self.isTrain))
            return z_x, conv1, conv2, conv3_before_fc
    def discriminate(self, x_var, x_exemplar, local_x_var, spectural_normed=False, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            conv = tf.concat([x_var, x_exemplar], axis=3)

            for i in range(5):
                output_dim = 64 * (i + 1)
                conv = lrelu(conv2d(conv, spectural_normed=spectural_normed, output_dim=output_dim, name='dis_conv_{}'.format(i)))

            conv = tf.reshape(conv, shape=[self.batch_size, conv.shape[1] * conv.shape[2] * conv.shape[3]])
            ful_global = fully_connect(conv, output_size=512, spectural_normed=spectural_normed, scope='dis_fully1')

            conv = local_x_var
            for i in range(5):

                output_dim = 64 * (i + 1)
                conv = lrelu(conv2d(conv, spectural_normed=spectural_normed, output_dim=output_dim, name='dis_conv_2_{}'.format(i)))

            conv = tf.reshape(conv, shape=[self.batch_size, conv.shape[1] * conv.shape[2] * conv.shape[3]])
            ful_local = fully_connect(conv, output_size=512, spectural_normed=spectural_normed, scope='dis_fully2')

            gan_logits = fully_connect(tf.concat([ful_global, ful_local], axis=1), output_size=1, spectural_normed=spectural_normed, scope='dis_fully3')

            return gan_logits
    def gen_net(self, z, y):

        with tf.variable_scope('generator') as scope:
 
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim]) #Reshape the input noise(the precondition of the CGAN into a shape 64x1x1x10)
            z = tf.concat([z, y], 1)
            c1, c2 = int( self.output_size / 4), int(self.output_size / 2 )

            # 10 stand for the num of labels
            d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1'))

            d1 = tf.concat([d1, y], 1)

            d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=7*7*2*64, scope='gen_fully2'), scope='gen_bn2'))

            d2 = tf.reshape(d2, [self.batch_size, c1, c1, 64 * 2])
            d2 = conv_cond_concat(d2, yb)

            d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))

            d3 = conv_cond_concat(d3, yb)

            d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel], 
                         name='gen_deconv2', initializer = xavier_initializer())

            return tf.nn.sigmoid(d4)
Example #9
0
    def generate(self, z_var, y, weights, biases):

        #add the first layer

        z_var = tf.concat([z_var, y], 1)

        d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))

        #add the second layer

        d1 = tf.concat([d1, y], 1)

        d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))

        d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
        y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])

        d2 = conv_cond_concat(d2, y)

        d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))

        d3 = conv_cond_concat(d3, y)

        output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])

        return tf.nn.sigmoid(output)
Example #10
0
    def discriminate(self, x_var, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse:
                scope.reuse_variables()

            conv1 = tf.nn.relu(conv2d(x_var, output_dim=32, name='dis_conv1'))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='dis_conv2'),
                             scope='dis_bn1',
                             reuse=reuse))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='dis_conv3'),
                             scope='dis_bn2',
                             reuse=reuse))
            conv4 = conv2d(conv3, output_dim=256, name='dis_conv4')
            middle_conv = conv4
            conv4 = tf.nn.relu(
                batch_normal(conv4, scope='dis_bn3', reuse=reuse))
            conv4 = tf.reshape(conv4, [self.batch_size, -1])
            fl = lrelu(
                batch_normal(fully_connect(conv4,
                                           output_size=512,
                                           scope='dis_fully1'),
                             scope='dis_bn4',
                             reuse=reuse))
            output = fully_connect(fl, output_size=1, scope='dis_fully2')

            return middle_conv, output
Example #11
0
    def gern_net(self, z, y):   #G的输出层不加BN层
        with tf.variable_scope('generator') as scope:
            # ? 1 1 10
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # ? 110 把z和y相连
            z = tf.concat([z, y], 1)  #在同一行的后面加,即列数增加  (64,100+10)
            # 7 14  计算中间层大小
            c1, c2 = int( self.output_size / 4), int(self.output_size / 2 ) #7,14

            # 10 stand for the num of labels
            # ? 1024
            d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1'))  #(64,1024)
            # ? 1034  在第一个全连接层后面在连接y
            d1 = tf.concat([d1, y], 1)  #(64,1034)
            # 全连接层2 ? 7*7*2*64  -> c1*c1*2*self.batch_size
            d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=c1*c2*self.batch_size, scope='gen_fully2'), scope='gen_bn2'))  #c1*c1*2*self.batch_size???
            #64,7*7*2*64

            # ? 7 7 128
            d2 = tf.reshape(d2, [self.batch_size, c1, c1, self.batch_size*2])  #64,7,7,128
            # ? 7 7 138 
            d2 = conv_cond_concat(d2, yb)# 又将y加到后面
            # ? 14 14 128
            d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))#64,14,14,128
            # ? 14 14 138 
            d3 = conv_cond_concat(d3, yb) # 再加一次
            # 输出 ? 28 28 1
            d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel],  name='gen_deconv2', initializer = xavier_initializer()) #64,28,28,1

            return tf.nn.sigmoid(d4)
    def discriminator(self, incom_x, local_x_left, local_x_right, guided_fp_left, guided_fp_right, reuse=False):

        with tf.variable_scope("discriminator") as scope:
            if reuse == True:
                scope.reuse_variables()

            x = incom_x
            for i in range(6):
                output_dim = np.minimum(16 * np.power(2, i+1), 256)
                print output_dim
                x = lrelu(conv2d(x, output_dim=output_dim, use_sp=self.use_sp, name='dis_conv_1_{}'.format(i)))
            x = tf.reshape(x, shape=[self.batch_size, -1])
            ful_global = fully_connect(x, output_size=output_dim, use_sp=self.use_sp, scope='dis_fu1')

            x = tf.concat([local_x_left, local_x_right], axis=3)
            for i in range(5):
                output_dim = np.minimum(16 * np.power(2, i+1), 256)
                x = lrelu(conv2d(x, output_dim=output_dim, use_sp=self.use_sp, name='dis_conv_2_{}'.format(i)))
            x = tf.reshape(x, shape=[self.batch_size, -1])
            ful_local = fully_connect(x, output_size=output_dim*2, use_sp=self.use_sp, scope='dis_fu2')

            ful = tf.concat([ful_global, ful_local, guided_fp_left, guided_fp_right], axis=1)
            ful = tf.nn.relu(fully_connect(ful, output_size=512, use_sp=self.use_sp, scope='dis_fu4'))
            gan_logits = fully_connect(ful, output_size=1, use_sp=self.use_sp, scope='dis_fu5')

            return gan_logits
Example #13
0
    def Style_Encode(self, x):

        with tf.variable_scope('sty_encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            #print(np.shape(conv1))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            #print(np.shape(conv2))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3', d_h=2),
                             scope='e_bn3'))
            #print(np.shape(conv3))
            # conv4 = tf.nn.relu(batch_normal(conv2d(conv3 , output_dim=10, name='e_c4'), scope='e_bn4'))
            conv5 = tf.reshape(conv3, [self.batch_size, 256 * 16 * 215])

            z_mean = batch_normal(fully_connect(conv5,
                                                output_size=1,
                                                scope='e_f5'),
                                  scope='e_bn5')
            z_sigma = batch_normal(fully_connect(conv5,
                                                 output_size=1,
                                                 scope='e_f6'),
                                   scope='e_bn6')

            return z_mean, z_sigma, conv1, conv2, conv3
Example #14
0
    def Style_Encode(self, x):

        with tf.variable_scope('sty_encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=2, name='e_c1', d_w=2),
                             scope='e_bn1'))
            print(np.shape(conv1))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=4, name='e_c2', d_w=2),
                             scope='e_bn2'))
            print(np.shape(conv2))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=6, name='e_c3', d_w=2),
                             scope='e_bn3'))
            print(np.shape(conv3))
            # # conv4 = tf.nn.relu(batch_normal(conv2d(conv3 , output_dim=10, name='e_c4'), scope='e_bn4'))
            # conv4 = tf.nn.relu(batch_normal(conv2d(conv3, output_dim=16, name='e_c4',d_w=4), scope='e_bn4'))
            # conv5 = tf.nn.relu(batch_normal(conv2d(conv4, output_dim=32, name='e_c5',d_w=4), scope='e_bn5'))

            conv6 = tf.reshape(conv3, [self.batch_size, 6 * 2 * 110250])

            z_mean = batch_normal(fully_connect(conv6,
                                                output_size=1,
                                                scope='e_f6'),
                                  scope='e_bn6')
            z_sigma = batch_normal(fully_connect(conv6,
                                                 output_size=1,
                                                 scope='e_f7'),
                                   scope='e_bn7')

            return z_mean, z_sigma, conv1, conv2, conv3  #, conv4, conv5
Example #15
0
    def dis_net(self, feature_vector, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # concat
            concat_data = tf.concat([feature_vector, y], 1)
            f1 = tf.nn.relu(
                batch_normal(fully_connect(concat_data,
                                           output_size=10,
                                           scope="dis_fully1"),
                             scope="dis_bn1"))
            f1 = tf.concat([f1, y], 1)
            f2 = lrelu(
                batch_normal(fully_connect(f1,
                                           output_size=10,
                                           scope="dis_fully1"),
                             scope="dis_bn1"))
            f2 = tf.concat([f2, y], 1)
            out = fully_connect(f2,
                                output_size=1,
                                scope='dis_fully2',
                                initializer=xavier_initializer())

            return tf.nn.sigmoid(out), out
    def Encode(self, x):

        with tf.variable_scope('encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            print("en conv1: ", conv1)
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            print("en conv2: ", conv2)
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3'))
            print("en conv3: ", conv3)
            shape = tf.shape(conv3)[0]
            shape = tf.stack([shape, 256 * 8 * 8])
            conv3 = tf.reshape(conv3, shape)
            print("en conv3: ", conv3)
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4'))
            print("fc1: ", fc1)
            z_mean = fully_connect(fc1, output_size=128, scope='e_f2')
            print("z_mean: ", z_mean)
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')
            print("z_sigma: ", z_sigma)
            return z_mean, z_sigma
Example #17
0
    def Encode(self, x):

        with tf.variable_scope('encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3'))
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4'))
            z_mean = fully_connect(fc1,
                                   output_size=self.latent_dim,
                                   scope='e_f2')
            z_sigma = fully_connect(fc1,
                                    output_size=self.latent_dim,
                                    scope='e_f3')

            return z_mean, z_sigma
Example #18
0
    def Encode(self, x, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3'))
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 13 * 13])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4'))
            z_mean = fully_connect(fc1, output_size=128, scope='e_f2')
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')

            return z_mean, z_sigma
def sample_net(batch_size , z , y, output_size):


    z = tf.concat(1, [z , y])

    # mnist data's shape is (28 , 28 , 1)
    # int the paper , s = 28
    c1, c2 = output_size / 4, output_size / 2

    # 10 stand for the num of labels
    d1 = fully_connect(z , weights2['wd'], biases2['bd'])
    d1 = batch_normal(d1, scope="genbn1" ,reuse=True)
    d1 = tf.nn.relu(d1)

    d2 = fully_connect(d1, weights2['wc1'], biases2['bc1'])
    d2 = batch_normal(d2, scope="genbn2" ,reuse=True)
    d2 = tf.nn.relu(d2)
    d2 = tf.reshape(d2, [batch_size, c1, c1 , 64 * 2])

    d3 = de_conv(d2, weights2['wc2'], biases2['bc2'], out_shape=[batch_size , c2, c2, 128])
    d3 = batch_normal(d3, scope="genbn3" ,reuse=True)
    d3 = tf.nn.relu(d3)

    d4 = de_conv(d3, weights2['wc3'], biases2['bc3'], out_shape=[batch_size, output_size, output_size, 1])

    return tf.nn.sigmoid(d4)
Example #20
0
    def discriminate(self, x_var, resnet=False, reuse=False):

        print x_var.shape
        with tf.variable_scope("discriminator") as scope:

            if reuse:
                scope.reuse_variables()
            if resnet == False:

                conv1 = lrelu(conv2d(x_var, spectural_normed=self.sn, iter=self.iter_power,
                       output_dim=64, kernel=3, stride=1, name='dis_conv1_1'))
                # conv1 = lrelu(conv2d(conv1, spectural_normed=self.sn, iter=self.iter_power,
                #        output_dim=64, name='dis_conv1_2'))
                # conv2 = lrelu(conv2d(conv1, spectural_normed=self.sn, iter=self.iter_power,
                #                      output_dim=128, k_w=3, k_h=3, d_h=1, d_w=1, name='dis_conv2_1'))
                conv2 = lrelu(conv2d(conv1, spectural_normed=self.sn, iter=self.iter_power,
                                     output_dim=128, name='dis_conv2_2'))
                # conv3 = lrelu(conv2d(conv2, spectural_normed=self.sn, iter=self.iter_power,
                #                      output_dim=256, k_h=3, k_w=3, d_w=1, d_h=1, name='dis_conv3_1'))
                conv3 = lrelu(conv2d(conv2, spectural_normed=self.sn, iter=self.iter_power,
                                     output_dim=256, name='dis_conv3_2'))
                conv4 = lrelu(conv2d(conv3, spectural_normed=self.sn, iter=self.iter_power,
                                     output_dim=512, kernel=1, name='dis_conv4'))
                conv4 = tf.reshape(conv4, [self.batch_size*self.num_rotation, -1])
                #for D
                gan_logits = fully_connect(conv4, spectural_normed=self.sn, iter=self.iter_power,
                                           output_size=1, scope='dis_fully1')
                if self.ssup:
                    rot_logits = fully_connect(conv4, spectural_normed=self.sn, iter=self.iter_power,
                                               output_size=4, scope='dis_fully2')
                    rot_prob = tf.nn.softmax(rot_logits)

            else:

                re1 = Residual_D(x_var, spectural_normed=self.sn, output_dims=128, residual_name='re1', down_sampling=True, is_start=True)
                re2 = Residual_D(re1, spectural_normed=self.sn, output_dims=128, residual_name='re2', down_sampling=True)
                re3 = Residual_D(re2, spectural_normed=self.sn, output_dims=128, residual_name='re3')
                re4 = Residual_D(re3, spectural_normed=self.sn, output_dims=128, residual_name='re4')
                re4 = tf.nn.relu(re4)
                #gsp
                gsp = tf.reduce_sum(re4, axis=[1, 2])
                gan_logits = fully_connect(gsp, spectural_normed=self.sn, iter=self.iter_power, output_size=1, scope='dis_fully1')
                if self.ssup:
                    rot_logits = fully_connect(gsp, spectural_normed=self.sn, iter=self.iter_power, output_size=4, scope='dis_fully2')
                    rot_prob = tf.nn.softmax(rot_logits)

            #tf.summary.histogram("logits", gan_logits)
            if self.ssup:
                return tf.nn.sigmoid(gan_logits), gan_logits, rot_logits, rot_prob
            else:
                return tf.nn.sigmoid(gan_logits), gan_logits
Example #21
0
    def discriminator(self, incom_x, local_x, pg=1, is_trans=False, alpha_trans=0.01, reuse=False):

        with tf.variable_scope("discriminator") as scope:
            if reuse == True:
                scope.reuse_variables()

            #global discriminator
            x = incom_x
            if is_trans:
                x_trans = downscale2d(x)
                #from rgb
                x_trans = lrelu(conv2d(x_trans, output_dim=self.get_nf(pg - 2), k_w=1, k_h=1, d_h=1, d_w=1, use_sp=self.use_sp,
                                       name='dis_rgb_g_{}'.format(x_trans.shape[1])))
            x = lrelu(conv2d(x, output_dim=self.get_nf(pg - 1), k_w=1, k_h=1, d_w=1, d_h=1, use_sp=self.use_sp,
                             name='dis_rgb_g_{}'.format(x.shape[1])))
            for i in range(pg - 1):
                x = lrelu(conv2d(x, output_dim=self.get_nf(pg - 2 - i), d_h=1, d_w=1, use_sp=self.use_sp,
                                 name='dis_conv_g_{}'.format(x.shape[1])))
                x = downscale2d(x)
                if i == 0 and is_trans:
                    x = alpha_trans * x + (1 - alpha_trans) * x_trans
            x = lrelu(conv2d(x, output_dim=self.get_nf(1), k_h=3, k_w=3, d_h=1, d_w=1, use_sp=self.use_sp,
                             name='dis_conv_g_1_{}'.format(x.shape[1])))
            x = tf.reshape(x, [self.batch_size, -1])
            x_g = fully_connect(x, output_size=256, use_sp=self.use_sp, name='dis_conv_g_fully')

            #local discriminator
            x = local_x
            if is_trans:
                x_trans = downscale2d(x)
                #from rgb
                x_trans = lrelu(conv2d(x_trans, output_dim=self.get_nf(pg - 2), k_w=1, k_h=1, d_h=1, d_w=1, use_sp=self.use_sp,
                                       name='dis_rgb_l_{}'.format(x_trans.shape[1])))
            x = lrelu(conv2d(x, output_dim=self.get_nf(pg - 1), k_w=1, k_h=1, d_w=1, d_h=1, use_sp=self.use_sp,
                             name='dis_rgb_l_{}'.format(x.shape[1])))

            for i in range(pg - 1):
                x = lrelu(conv2d(x, output_dim=self.get_nf(pg - 2 - i), d_h=1, d_w=1, use_sp=self.use_sp,
                                 name='dis_conv_l_{}'.format(x.shape[1])))
                x= downscale2d(x)
                if i == 0 and is_trans:
                    x = alpha_trans * x + (1 - alpha_trans) * x_trans

            x = lrelu(conv2d(x, output_dim=self.get_nf(1), k_h=3, k_w=3, d_h=1, d_w=1, use_sp=self.use_sp,
                             name='dis_conv_l_1_{}'.format(x.shape[1])))
            x = tf.reshape(x, [self.batch_size, -1])
            x_l = fully_connect(x, output_size=256, use_sp=self.use_sp, name='dis_conv_l_fully')

            logits = fully_connect(tf.concat([x_g, x_l], axis=1), output_size=1, use_sp=self.use_sp, name='dis_conv_fully')

            return logits
Example #22
0
    def Encode2(self, vec, reuse=False):

        with tf.variable_scope('encode_v') as scope:
            if reuse == True:
                scope.reuse_variables()

            fc1 = tf.nn.relu(
                batch_normal(fully_connect(vec,
                                           output_size=1024,
                                           scope='e2_v_1'),
                             scope='e2_v_bn1'))
            z_mean = fully_connect(fc1, output_size=128, scope='e2_f2')
            z_sigma = fully_connect(fc1, output_size=128, scope='e2_f3')

            return z_mean, z_sigma
Example #23
0
    def gern_net(self, z, y):

        with tf.variable_scope('generator') as scope:

            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            z = tf.concat([z, y], 1)
            # c1, c2 = self.output_size / 4, self.output_size / 2
            c1_row, c2_row = int(self.output_size_row / 4), int(
                self.output_size_row / 2)
            c1_col, c2_col = int(self.output_size_col / 4), int(
                self.output_size_col / 2)

            # 10 stand for the num of labels
            d1 = tf.nn.relu(
                batch_normal(fully_connect(z,
                                           output_size=1024,
                                           scope='gen_fully'),
                             scope='gen_bn1'))

            d1 = tf.concat([d1, y], 1)

            d2 = tf.nn.relu(
                batch_normal(fully_connect(d1,
                                           output_size=c1_row * c1_col * 2 *
                                           64,
                                           scope='gen_fully2'),
                             scope='gen_bn2'))

            d2 = tf.reshape(d2, [self.batch_size, c1_row, c1_col, 64 * 2])
            d2 = conv_cond_concat(d2, yb)

            d3 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, c2_row, c2_col, 128],
                    name='gen_deconv1'),
                             scope='gen_bn3'))

            d3 = conv_cond_concat(d3, yb)

            d4 = de_conv(d3,
                         output_shape=[
                             self.batch_size, self.output_size_row,
                             self.output_size_col, self.channel
                         ],
                         name='gen_deconv2')

            return tf.nn.sigmoid(d4)
Example #24
0
    def generate(self, z_var, batch_size=64, resnet=False, is_train=True, reuse=False):

        with tf.variable_scope('generator') as scope:

            s = 4
            if reuse:
                scope.reuse_variables()
            if self.output_size == 32:
                s = 4
            elif self.output_size == 48:
                s = 6

            d1 = fully_connect(z_var, output_size=s*s*256, scope='gen_fully1')
            d1 = tf.reshape(d1, [-1, s, s, 256])

            if resnet == False:

                d1 = tf.nn.relu(d1)
                d2 = tf.nn.relu(batch_normal(de_conv(d1, output_shape=[batch_size, s*2, s*2, 256], name='gen_deconv2')
                                             , scope='bn1', is_training=is_train))
                d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[batch_size, s*4, s*4, 128], name='gen_deconv3')
                                             , scope='bn2', is_training=is_train))
                d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[batch_size, s*8, s*8, 64], name='gen_deconv4')
                                             , scope='bn3', is_training=is_train))
                d5 = conv2d(d4, output_dim=self.channel, stride=1, kernel=3, name='gen_conv')

            else:

                d2 = Residual_G(d1, output_dims=256, up_sampling=True, residual_name='in1')
                d3 = Residual_G(d2, output_dims=256, up_sampling=True, residual_name='in2')
                d4 = Residual_G(d3, output_dims=256, up_sampling=True, residual_name='in3')
                d4 = tf.nn.relu(batch_normal(d4, scope='in4'))
                d5 = conv2d(d4, output_dim=self.channel, kernel=3, stride=1, name='gen_conv')

            return tf.tanh(d5)
Example #25
0
    def encode_z(self, x, weights, biases):
        print('x', x.shape)
        c1 = tf.nn.relu(
            batch_normal(conv2d(x, weights['e1'], biases['eb1']),
                         scope='enz_bn1'))
        print('c1', c1.shape)
        c2 = tf.nn.relu(
            batch_normal(conv2d(c1, weights['e2'], biases['eb2']),
                         scope='enz_bn2'))
        print('c2', c2.shape)
        c3 = tf.nn.relu(
            batch_normal(conv2d(c2, weights['e3'], biases['eb3']),
                         scope='enz_bn3'))
        print('c3', c3.shape)
        c4 = tf.nn.relu(
            batch_normal(conv2d(c3, weights['e4'], biases['eb4']),
                         scope='enz_bn4'))
        print('c4', c4.shape)
        c4 = tf.reshape(c3, [self.batch_size, 128 * 16 * 16])
        print('c2', c2.shape)
        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'],
                                              biases['eb3']),
                                scope='enz_bn3')
        print('result_z', result_z.shape)
        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
Example #26
0
    def discriminate(self, conv, reuse=False, pg=1, t=False, alpha_trans=0.01):
        #dis_as_v = []
        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()
            if t:
                conv_iden = downscale2d(conv)
                #from RGB
                conv_iden = lrelu(conv2d(conv_iden, output_dim= self.get_nf(pg - 2), k_w=1, k_h=1, d_h=1, d_w=1, use_wscale=self.use_wscale,
                           name='dis_y_rgb_conv_{}'.format(conv_iden.shape[1])))
            # fromRGB
            conv = lrelu(conv2d(conv, output_dim=self.get_nf(pg - 1), k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, name='dis_y_rgb_conv_{}'.format(conv.shape[1])))

            for i in range(pg - 1):
                conv = lrelu(conv2d(conv, output_dim=self.get_nf(pg - 1 - i), d_h=1, d_w=1, use_wscale=self.use_wscale,
                                    name='dis_n_conv_1_{}'.format(conv.shape[1])))
                conv = lrelu(conv2d(conv, output_dim=self.get_nf(pg - 2 - i), d_h=1, d_w=1, use_wscale=self.use_wscale,
                                                      name='dis_n_conv_2_{}'.format(conv.shape[1])))
                conv = downscale2d(conv)
                if i == 0 and t:
                    conv = alpha_trans * conv + (1 - alpha_trans) * conv_iden

            conv = MinibatchstateConcat(conv)
            conv = lrelu(
                conv2d(conv, output_dim=self.get_nf(1), k_w=3, k_h=3, d_h=1, d_w=1, use_wscale=self.use_wscale, name='dis_n_conv_1_{}'.format(conv.shape[1])))
            conv = lrelu(
                conv2d(conv, output_dim=self.get_nf(1), k_w=4, k_h=4, d_h=1, d_w=1, use_wscale=self.use_wscale, padding='VALID', name='dis_n_conv_2_{}'.format(conv.shape[1])))
            conv = tf.reshape(conv, [self.batch_size, -1])

            #for D
            output = fully_connect(conv, output_size=1, use_wscale=self.use_wscale, gain=1, name='dis_n_fully')

            return tf.nn.sigmoid(output), output
Example #27
0
    def encode_z(self, x, weights, biases):

        c1 = tf.nn.relu(
            batch_normal(conv2d(x, weights['e1'], biases['eb1']),
                         scope='enz_bn1'))

        c2 = tf.nn.relu(
            batch_normal(conv2d(c1, weights['e2'], biases['eb2']),
                         scope='enz_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])

        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'],
                                              biases['eb3']),
                                scope='enz_bn3')

        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
Example #28
0
    def generate_mnist(self, z_var, reuse=False):

        with tf.variable_scope('generator') as scope:

            if reuse == True:
                scope.reuse_variables()

            d1 = tf.nn.relu(
                batch_normal(fully_connect(z_var,
                                           output_size=7 * 7 * 32,
                                           scope='gen_fully1'),
                             scope='gen_bn1',
                             reuse=reuse))
            d2 = tf.reshape(d1, [self.batch_size, 7, 7, 32])
            d2 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, 14, 14, 16],
                    name='gen_deconv2'),
                             scope='gen_bn2',
                             reuse=reuse))
            d3 = de_conv(d2,
                         output_shape=[self.batch_size, 28, 28, 1],
                         name='gen_deconv3')
            return tf.nn.sigmoid(d3)
Example #29
0
    def Encode(self, x, cov1, cov2, cov3, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=2, name='e_c1', d_w=2),
                             scope='e_bn1'))

            conv1 = conv1 + cov1
            print(np.shape(conv1))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=4, name='e_c2', d_w=2),
                             scope='e_bn2'))
            conv2 = conv2 + cov2
            print(np.shape(conv2))

            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=6, name='e_c3', d_w=2),
                             scope='e_bn3'))
            conv3 = conv3 + cov3
            print(np.shape(conv3))
            #
            # conv4 = tf.nn.relu(batch_normal(conv2d(conv3 , output_dim=16, name='e_c4',d_w=4), scope='e_bn4'))
            # #conv4 = conv4+cov4
            # print(np.shape(conv4))
            #
            # conv5 = tf.nn.relu(batch_normal(conv2d(conv4, output_dim=32, name='e_c5',d_w=4), scope='e_bn5'))
            # # conv5 = conv5+cov5
            # print(np.shape(conv5))

            conv6 = tf.reshape(conv3, [self.batch_size, 6 * 2 * 110250])

            z_mean = batch_normal(fully_connect(conv6,
                                                output_size=1,
                                                scope='e_f6'),
                                  scope='e_bn6')
            z_sigma = batch_normal(fully_connect(conv6,
                                                 output_size=1,
                                                 scope='e_f7'),
                                   scope='e_bn7')

            return z_mean, z_sigma, conv1, conv2, conv3  #, conv4, conv5
Example #30
0
    def gern_net(self, z, y):

        with tf.variable_scope('generator') as scope:

            z = tf.concat([z, y], 1)
            d1 = tf.nn.relu(
                batch_normal(fully_connect(z,
                                           output_size=11,
                                           scope="gen_fully1"),
                             scope="gen_bn1"))
            d2 = tf.nn.relu(
                batch_normal(fully_connect(d1,
                                           output_size=11,
                                           scope="gen_fully2"),
                             scope="gen_bn2"))
            return fully_connect(d2,
                                 output_size=4,
                                 scope="gen_fully3",
                                 initializer=xavier_initializer())