Exemple #1
0
    def encode_z(self, x, weights, biases):
        print('x', x.shape)
        c1 = tf.nn.relu(
            batch_normal(conv2d(x, weights['e1'], biases['eb1']),
                         scope='enz_bn1'))
        print('c1', c1.shape)
        c2 = tf.nn.relu(
            batch_normal(conv2d(c1, weights['e2'], biases['eb2']),
                         scope='enz_bn2'))
        print('c2', c2.shape)
        c3 = tf.nn.relu(
            batch_normal(conv2d(c2, weights['e3'], biases['eb3']),
                         scope='enz_bn3'))
        print('c3', c3.shape)
        c4 = tf.nn.relu(
            batch_normal(conv2d(c3, weights['e4'], biases['eb4']),
                         scope='enz_bn4'))
        print('c4', c4.shape)
        c4 = tf.reshape(c3, [self.batch_size, 128 * 16 * 16])
        print('c2', c2.shape)
        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'],
                                              biases['eb3']),
                                scope='enz_bn3')
        print('result_z', result_z.shape)
        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
Exemple #2
0
    def generate_mnist(self, z_var, reuse=False):

        with tf.variable_scope('generator') as scope:

            if reuse == True:
                scope.reuse_variables()

            d1 = tf.nn.relu(
                batch_normal(fully_connect(z_var,
                                           output_size=7 * 7 * 32,
                                           scope='gen_fully1'),
                             scope='gen_bn1',
                             reuse=reuse))
            d2 = tf.reshape(d1, [self.batch_size, 7, 7, 32])
            d2 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, 14, 14, 16],
                    name='gen_deconv2'),
                             scope='gen_bn2',
                             reuse=reuse))
            d3 = de_conv(d2,
                         output_shape=[self.batch_size, 28, 28, 1],
                         name='gen_deconv3')
            return tf.nn.sigmoid(d3)
    def dis_net(self, images, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # mnist data's shape is (28 , 28 , 1)
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # concat
            concat_data = conv_cond_concat(images, yb)

            conv1, w1 = conv2d(concat_data, output_dim=10, name='dis_conv1')
            tf.add_to_collection('weight_1', w1)

            conv1 = lrelu(conv1)
            conv1 = conv_cond_concat(conv1, yb)
            tf.add_to_collection('ac_1', conv1)


            conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
            tf.add_to_collection('weight_2', w2)

            conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
            tf.add_to_collection('ac_2', conv2)

            conv2 = tf.reshape(conv2, [self.batch_size, -1])
            conv2 = tf.concat([conv2, y], 1)

            f1 = lrelu(batch_normal(fully_connect(conv2, output_size=1024, scope='dis_fully1'), scope='dis_bn2', reuse=reuse))
            f1 = tf.concat([f1, y], 1)

            out = fully_connect(f1, output_size=1, scope='dis_fully2',  initializer = xavier_initializer())

            return tf.nn.sigmoid(out), out
Exemple #4
0
    def encode_z(self, x, weights, biases):

        c1 = tf.nn.relu(
            batch_normal(conv2d(x, weights['e1'], biases['eb1']),
                         scope='enz_bn1'))

        c2 = tf.nn.relu(
            batch_normal(conv2d(c1, weights['e2'], biases['eb2']),
                         scope='enz_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])

        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'],
                                              biases['eb3']),
                                scope='enz_bn3')

        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
Exemple #5
0
    def Style_generate(self, z_var, reuse=False):

        with tf.variable_scope('sty_generator') as scope:

            d2 = tf.reshape(z_var, [self.batch_size, 215, 16, 256])
            d2 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, 430, 64, 128],
                    name='gen_deconv2',
                    d_h=2),
                             scope='gen_bn2'))
            d3 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, 1720, 256, 64],
                    name='gen_deconv3'),
                             scope='gen_bn3'))
            d4 = tf.nn.relu(
                batch_normal(de_conv(
                    d3,
                    output_shape=[self.batch_size, 6880, 1024, 1],
                    name='gen_deconv4'),
                             scope='gen_bn4',
                             reuse=reuse))
            d5 = de_conv(d4,
                         output_shape=[self.batch_size, 6880, 1024, 1],
                         name='gen_deconv5',
                         d_h=1,
                         d_w=1)

            return tf.nn.relu(d5)
Exemple #6
0
    def Style_Encode(self, x):

        with tf.variable_scope('sty_encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            #print(np.shape(conv1))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            #print(np.shape(conv2))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3', d_h=2),
                             scope='e_bn3'))
            #print(np.shape(conv3))
            # conv4 = tf.nn.relu(batch_normal(conv2d(conv3 , output_dim=10, name='e_c4'), scope='e_bn4'))
            conv5 = tf.reshape(conv3, [self.batch_size, 256 * 16 * 215])

            z_mean = batch_normal(fully_connect(conv5,
                                                output_size=1,
                                                scope='e_f5'),
                                  scope='e_bn5')
            z_sigma = batch_normal(fully_connect(conv5,
                                                 output_size=1,
                                                 scope='e_f6'),
                                   scope='e_bn6')

            return z_mean, z_sigma, conv1, conv2, conv3
Exemple #7
0
    def gern_net(self, z, y):   #G的输出层不加BN层
        with tf.variable_scope('generator') as scope:
            # ? 1 1 10
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # ? 110 把z和y相连
            z = tf.concat([z, y], 1)  #在同一行的后面加,即列数增加  (64,100+10)
            # 7 14  计算中间层大小
            c1, c2 = int( self.output_size / 4), int(self.output_size / 2 ) #7,14

            # 10 stand for the num of labels
            # ? 1024
            d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1'))  #(64,1024)
            # ? 1034  在第一个全连接层后面在连接y
            d1 = tf.concat([d1, y], 1)  #(64,1034)
            # 全连接层2 ? 7*7*2*64  -> c1*c1*2*self.batch_size
            d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=c1*c2*self.batch_size, scope='gen_fully2'), scope='gen_bn2'))  #c1*c1*2*self.batch_size???
            #64,7*7*2*64

            # ? 7 7 128
            d2 = tf.reshape(d2, [self.batch_size, c1, c1, self.batch_size*2])  #64,7,7,128
            # ? 7 7 138 
            d2 = conv_cond_concat(d2, yb)# 又将y加到后面
            # ? 14 14 128
            d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))#64,14,14,128
            # ? 14 14 138 
            d3 = conv_cond_concat(d3, yb) # 再加一次
            # 输出 ? 28 28 1
            d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel],  name='gen_deconv2', initializer = xavier_initializer()) #64,28,28,1

            return tf.nn.sigmoid(d4)
Exemple #8
0
    def Style_Encode(self, x):

        with tf.variable_scope('sty_encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=2, name='e_c1', d_w=2),
                             scope='e_bn1'))
            print(np.shape(conv1))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=4, name='e_c2', d_w=2),
                             scope='e_bn2'))
            print(np.shape(conv2))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=6, name='e_c3', d_w=2),
                             scope='e_bn3'))
            print(np.shape(conv3))
            # # conv4 = tf.nn.relu(batch_normal(conv2d(conv3 , output_dim=10, name='e_c4'), scope='e_bn4'))
            # conv4 = tf.nn.relu(batch_normal(conv2d(conv3, output_dim=16, name='e_c4',d_w=4), scope='e_bn4'))
            # conv5 = tf.nn.relu(batch_normal(conv2d(conv4, output_dim=32, name='e_c5',d_w=4), scope='e_bn5'))

            conv6 = tf.reshape(conv3, [self.batch_size, 6 * 2 * 110250])

            z_mean = batch_normal(fully_connect(conv6,
                                                output_size=1,
                                                scope='e_f6'),
                                  scope='e_bn6')
            z_sigma = batch_normal(fully_connect(conv6,
                                                 output_size=1,
                                                 scope='e_f7'),
                                   scope='e_bn7')

            return z_mean, z_sigma, conv1, conv2, conv3  #, conv4, conv5
def dis_net(data_array , y , weights , biases , reuse=False):

    # mnist data's shape is (28 , 28 , 1)

    y = tf.reshape(y , shape=[batch_size, 1 , 1 , y_dim])
    # concat
    data_array = conv_cond_concat(data_array , y)

    conv1 = conv2d(data_array , weights['wc1'] , biases['bc1'])

    tf.add_to_collection('weight_1', weights['wc1'])

    conv1 = lrelu(conv1)

    tf.add_to_collection('ac_1' , conv1)

    conv2 = conv2d(conv1 , weights['wc2']  , biases['bc2'])
    conv2 = batch_normal(conv2 ,scope="dis_bn1" , reuse=reuse)
    conv2 = lrelu(conv2)

    tf.add_to_collection('weight_2', weights['wc2'])

    tf.add_to_collection('ac_2', conv2)

    conv2 = tf.reshape(conv2 , [batch_size , -1])

    f1 = fully_connect(conv2 ,weights['wc3'] , biases['bc3'])
    f1 = batch_normal(f1 , scope="dis_bn2" , reuse=reuse)
    f1 = lrelu(f1)

    out = fully_connect( f1 , weights['wd'] , biases['bd'])

    return tf.nn.sigmoid(out) , out
Exemple #10
0
    def discriminate(self, x_var, y, weights, biases, reuse=False):

        y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
        x_var = conv_cond_concat(x_var, y1)

        conv1 = lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))

        conv1 = conv_cond_concat(conv1, y1)

        conv2 = lrelu(
            batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']),
                         scope='dis_bn1',
                         reuse=reuse))

        conv2 = tf.reshape(conv2, [self.batch_size, -1])

        conv2 = tf.concat([conv2, y], 1)

        fc1 = lrelu(
            batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']),
                         scope='dis_bn2',
                         reuse=reuse))

        fc1 = tf.concat([fc1, y], 1)
        #for D
        output = fully_connect(fc1, weights['wd'], biases['bd'])

        return tf.nn.sigmoid(output)
Exemple #11
0
    def Encode(self, img, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()
            conv1 = tf.nn.relu(
                batch_normal(conv2d(img, output_dim=64, name='e_c1'),
                             scope='e_bn1',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('conv1_shape', conv1.get_shape())
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('conv2_shape', conv2.get_shape())
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('conv3_shape', conv3.get_shape())
            conv3_before_fc = conv3
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4',
                             reuse=reuse,
                             isTrain=self.isTrain))
            z_mean = fully_connect(fc1, output_size=128, scope='e_f2')
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')
            return z_mean, z_sigma, conv1, conv2, conv3_before_fc  #应该是激活之前的值,还是激活之后的值呢?
Exemple #12
0
    def Encode_AE(self, img, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()
            conv1 = tf.nn.relu(
                batch_normal(conv2d(img, output_dim=64, name='e_c1'),
                             scope='e_bn1',
                             reuse=reuse,
                             isTrain=self.isTrain))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2',
                             reuse=reuse,
                             isTrain=self.isTrain))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3',
                             reuse=reuse,
                             isTrain=self.isTrain))
            conv3_before_fc = conv3
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4',
                             reuse=reuse,
                             isTrain=self.isTrain))
            z_x = tf.nn.relu(
                batch_normal(fully_connect(fc1, output_size=128, scope='e_f2'),
                             scope='e_bn5',
                             reuse=reuse,
                             isTrain=self.isTrain))
            return z_x, conv1, conv2, conv3_before_fc
    def Encode(self, x):

        with tf.variable_scope('encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            print("en conv1: ", conv1)
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            print("en conv2: ", conv2)
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3'))
            print("en conv3: ", conv3)
            shape = tf.shape(conv3)[0]
            shape = tf.stack([shape, 256 * 8 * 8])
            conv3 = tf.reshape(conv3, shape)
            print("en conv3: ", conv3)
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4'))
            print("fc1: ", fc1)
            z_mean = fully_connect(fc1, output_size=128, scope='e_f2')
            print("z_mean: ", z_mean)
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')
            print("z_sigma: ", z_sigma)
            return z_mean, z_sigma
Exemple #14
0
    def generate(self, z_var, batch_size=64, resnet=False, is_train=True, reuse=False):

        with tf.variable_scope('generator') as scope:

            s = 4
            if reuse:
                scope.reuse_variables()
            if self.output_size == 32:
                s = 4
            elif self.output_size == 48:
                s = 6

            d1 = fully_connect(z_var, output_size=s*s*256, scope='gen_fully1')
            d1 = tf.reshape(d1, [-1, s, s, 256])

            if resnet == False:

                d1 = tf.nn.relu(d1)
                d2 = tf.nn.relu(batch_normal(de_conv(d1, output_shape=[batch_size, s*2, s*2, 256], name='gen_deconv2')
                                             , scope='bn1', is_training=is_train))
                d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[batch_size, s*4, s*4, 128], name='gen_deconv3')
                                             , scope='bn2', is_training=is_train))
                d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[batch_size, s*8, s*8, 64], name='gen_deconv4')
                                             , scope='bn3', is_training=is_train))
                d5 = conv2d(d4, output_dim=self.channel, stride=1, kernel=3, name='gen_conv')

            else:

                d2 = Residual_G(d1, output_dims=256, up_sampling=True, residual_name='in1')
                d3 = Residual_G(d2, output_dims=256, up_sampling=True, residual_name='in2')
                d4 = Residual_G(d3, output_dims=256, up_sampling=True, residual_name='in3')
                d4 = tf.nn.relu(batch_normal(d4, scope='in4'))
                d5 = conv2d(d4, output_dim=self.channel, kernel=3, stride=1, name='gen_conv')

            return tf.tanh(d5)
Exemple #15
0
    def Encode(self, x, reuse=False):

        with tf.variable_scope('encode') as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3'))
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 13 * 13])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4'))
            z_mean = fully_connect(fc1, output_size=128, scope='e_f2')
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')

            return z_mean, z_sigma
Exemple #16
0
    def dis_net(self, feature_vector, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # concat
            concat_data = tf.concat([feature_vector, y], 1)
            f1 = tf.nn.relu(
                batch_normal(fully_connect(concat_data,
                                           output_size=10,
                                           scope="dis_fully1"),
                             scope="dis_bn1"))
            f1 = tf.concat([f1, y], 1)
            f2 = lrelu(
                batch_normal(fully_connect(f1,
                                           output_size=10,
                                           scope="dis_fully1"),
                             scope="dis_bn1"))
            f2 = tf.concat([f2, y], 1)
            out = fully_connect(f2,
                                output_size=1,
                                scope='dis_fully2',
                                initializer=xavier_initializer())

            return tf.nn.sigmoid(out), out
Exemple #17
0
def ResidualBlock(x,
                  outchannel,
                  train,
                  stride=1,
                  shortcut=None,
                  name="ResidualBlock"):
    with tf.variable_scope(name):
        conv1 = ops.conv2d(x,
                           outchannel,
                           k_h=3,
                           k_w=3,
                           s_h=stride,
                           s_w=stride,
                           scope="conv1")
        bn1 = tf.nn.relu(ops.batch_normal(conv1, train=train, scope="bn1"))
        conv2 = ops.conv2d(bn1,
                           outchannel,
                           k_h=3,
                           k_w=3,
                           s_h=1,
                           s_w=1,
                           with_bias=False,
                           scope="conv2")
        left = ops.batch_normal(conv2, train=train, scope="bn2")
        right = x if shortcut is None else shortcut(x)
        return tf.nn.relu(left + right)
def sample_net(batch_size, z, y, output_size):
    z = tf.concat([z, y], 1)
    yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
    c1, c2 = output_size / 4, output_size / 2

    # 10 stand for the num of labels
    d1 = fully_connect(z, weights2['wd'], biases2['bd'])
    d1 = batch_normal(d1, scope="genbn1", reuse=True)
    d1 = tf.nn.relu(d1)
    d1 = tf.concat([d1, y], 1)

    d2 = fully_connect(d1, weights2['wc1'], biases2['bc1'])
    d2 = batch_normal(d2, scope="genbn2", reuse=True)
    d2 = tf.nn.relu(d2)
    d2 = tf.reshape(d2, [batch_size, c1, c1, 64 * 2])
    d2 = conv_cond_concat(d2, yb)

    d3 = de_conv(d2,
                 weights2['wc2'],
                 biases2['bc2'],
                 out_shape=[batch_size, c2, c2, 128])
    d3 = batch_normal(d3, scope="genbn3", reuse=True)
    d3 = tf.nn.relu(d3)
    d3 = conv_cond_concat(d3, yb)

    d4 = de_conv(d3,
                 weights2['wc3'],
                 biases2['bc3'],
                 out_shape=[batch_size, output_size, output_size, channel])

    return tf.nn.sigmoid(d4)
def sample_net(batch_size , z , y, output_size):


    z = tf.concat(1, [z , y])

    # mnist data's shape is (28 , 28 , 1)
    # int the paper , s = 28
    c1, c2 = output_size / 4, output_size / 2

    # 10 stand for the num of labels
    d1 = fully_connect(z , weights2['wd'], biases2['bd'])
    d1 = batch_normal(d1, scope="genbn1" ,reuse=True)
    d1 = tf.nn.relu(d1)

    d2 = fully_connect(d1, weights2['wc1'], biases2['bc1'])
    d2 = batch_normal(d2, scope="genbn2" ,reuse=True)
    d2 = tf.nn.relu(d2)
    d2 = tf.reshape(d2, [batch_size, c1, c1 , 64 * 2])

    d3 = de_conv(d2, weights2['wc2'], biases2['bc2'], out_shape=[batch_size , c2, c2, 128])
    d3 = batch_normal(d3, scope="genbn3" ,reuse=True)
    d3 = tf.nn.relu(d3)

    d4 = de_conv(d3, weights2['wc3'], biases2['bc3'], out_shape=[batch_size, output_size, output_size, 1])

    return tf.nn.sigmoid(d4)
    def gen_net(self, z, y):

        with tf.variable_scope('generator') as scope:
 
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim]) #Reshape the input noise(the precondition of the CGAN into a shape 64x1x1x10)
            z = tf.concat([z, y], 1)
            c1, c2 = int( self.output_size / 4), int(self.output_size / 2 )

            # 10 stand for the num of labels
            d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1'))

            d1 = tf.concat([d1, y], 1)

            d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=7*7*2*64, scope='gen_fully2'), scope='gen_bn2'))

            d2 = tf.reshape(d2, [self.batch_size, c1, c1, 64 * 2])
            d2 = conv_cond_concat(d2, yb)

            d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))

            d3 = conv_cond_concat(d3, yb)

            d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel], 
                         name='gen_deconv2', initializer = xavier_initializer())

            return tf.nn.sigmoid(d4)
    def Encode(self, x):

        with tf.variable_scope('encode') as scope:

            conv1 = tf.nn.relu(
                batch_normal(conv2d(x, output_dim=64, name='e_c1'),
                             scope='e_bn1'))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='e_c2'),
                             scope='e_bn2'))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='e_c3'),
                             scope='e_bn3'))
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(conv3,
                                           output_size=1024,
                                           scope='e_f1'),
                             scope='e_bn4'))
            z_mean = fully_connect(fc1,
                                   output_size=self.latent_dim,
                                   scope='e_f2')
            z_sigma = fully_connect(fc1,
                                    output_size=self.latent_dim,
                                    scope='e_f3')

            return z_mean, z_sigma
Exemple #22
0
    def generate(self, z_var, y, weights, biases):

        #add the first layer

        z_var = tf.concat([z_var, y], 1)

        d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))

        #add the second layer

        d1 = tf.concat([d1, y], 1)

        d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))

        d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
        y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])

        d2 = conv_cond_concat(d2, y)

        d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))

        d3 = conv_cond_concat(d3, y)

        output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])

        return tf.nn.sigmoid(output)
Exemple #23
0
    def Embed_landmark(self, lm, reuse=False):

        with tf.variable_scope('embed') as scope:
            if reuse == True:
                scope.reuse_variables()
            fc1 = tf.nn.relu(
                batch_normal(fully_connect(lm, output_size=128, scope='em_f1'),
                             scope='em_bn1',
                             reuse=reuse,
                             isTrain=self.isTrain))
            fc2 = tf.nn.relu(
                batch_normal(fully_connect(fc1, output_size=64, scope='em_f2'),
                             scope='em_bn2',
                             reuse=reuse,
                             isTrain=self.isTrain))
            fc3 = tf.nn.relu(
                batch_normal(fully_connect(fc2, output_size=32, scope='em_f3'),
                             scope='em_bn3',
                             reuse=reuse,
                             isTrain=self.isTrain))
            fc4 = tf.nn.relu(
                batch_normal(fully_connect(fc3, output_size=64, scope='em_f4'),
                             scope='em_bn4',
                             reuse=reuse,
                             isTrain=self.isTrain))
            fc5 = tf.nn.relu(
                batch_normal(fully_connect(fc4, output_size=128,
                                           scope='em_f5'),
                             scope='em_bn5',
                             reuse=reuse,
                             isTrain=self.isTrain))
            fc6 = tf.nn.tanh((fully_connect(fc5,
                                            output_size=68 * 2,
                                            scope='em_f6')))
            return fc3, fc6
Exemple #24
0
    def discriminate(self, x_var, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse:
                scope.reuse_variables()

            conv1 = tf.nn.relu(conv2d(x_var, output_dim=32, name='dis_conv1'))
            conv2 = tf.nn.relu(
                batch_normal(conv2d(conv1, output_dim=128, name='dis_conv2'),
                             scope='dis_bn1',
                             reuse=reuse))
            conv3 = tf.nn.relu(
                batch_normal(conv2d(conv2, output_dim=256, name='dis_conv3'),
                             scope='dis_bn2',
                             reuse=reuse))
            conv4 = conv2d(conv3, output_dim=256, name='dis_conv4')
            middle_conv = conv4
            conv4 = tf.nn.relu(
                batch_normal(conv4, scope='dis_bn3', reuse=reuse))
            conv4 = tf.reshape(conv4, [self.batch_size, -1])
            fl = lrelu(
                batch_normal(fully_connect(conv4,
                                           output_size=512,
                                           scope='dis_fully1'),
                             scope='dis_bn4',
                             reuse=reuse))
            output = fully_connect(fl, output_size=1, scope='dis_fully2')

            return middle_conv, output
def inference(image_holder, is_training):
    # 卷积->relu激活->最大池化->标准化
    weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64],
                                        stddev=5e-2,
                                        wl=0.)
    bias1 = tf.Variable(tf.constant(0., shape=[64]))
    kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME')
    conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))
    pool1 = tf.nn.max_pool(conv1,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')
    norm1 = ops.batch_normal(pool1, train=is_training, scope='BN_1')

    # 卷积->relu激活->标准化->最大池化
    weight2 = variable_with_weight_loss(shape=[5, 5, 64, 64],
                                        stddev=5e-2,
                                        wl=0.)
    bias2 = tf.Variable(tf.constant(0., shape=[64]))
    kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding='SAME')
    conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))
    norm2 = ops.batch_normal(conv2, train=is_training, scope='BN_2')
    pool2 = tf.nn.max_pool(norm2,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    # 全连接:reshape尺寸->384
    p2s = pool2.get_shape()
    reshape = tf.reshape(pool2, [-1, p2s[1] * p2s[2] * p2s[3]])
    dim = reshape.get_shape()[1].value
    weight3 = variable_with_weight_loss(shape=[dim, 384],
                                        stddev=0.04,
                                        wl=0.004)
    bias3 = tf.Variable(tf.constant(0.1, shape=[384]))
    local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3)

    # 全连接:384->192
    weight4 = variable_with_weight_loss(shape=[384, 192],
                                        stddev=0.04,
                                        wl=0.004)
    bias4 = tf.Variable(tf.constant(0.1, shape=[192]))
    """
    tf.nn.bias_add 是 tf.add 的一个特例
    二者均支持 broadcasting(广播机制),也即两个操作数最后一个维度保持一致。
    除了支持最后一个维度保持一致的两个操作数相加外,tf.add 还支持第二个操作数是一维的情况
    """
    local4 = tf.nn.relu(tf.nn.bias_add(tf.matmul(local3, weight4), bias4))

    # 全连接:192->10
    weight5 = variable_with_weight_loss(shape=[192, 10],
                                        stddev=1 / 192.,
                                        wl=0.)
    bias5 = tf.Variable(tf.constant(0., shape=[10]))
    logits = tf.add(tf.matmul(local4, weight5), bias5, name="logits")
    # print(logits)
    return logits
Exemple #26
0
    def generate(self, z_var, y, weights, biases):
        g_prob = 1

        # concat z_var and y
        z_var = tf.concat([z_var, y], 1)
        d0 = lrelu(
            batch_normal(fully_connect(z_var, weights['wc0'], biases['bc0']),
                         scope='gen_bn0'))
        z_var = tf.reshape(d0, shape=[d0.shape[0], 1, 1, d0.shape[1]])
        #         z_var = tf.reshape(z_var, shape=[z_var.shape[0], 1, 1, z_var.shape[1]])
        print('z_var', z_var.shape)
        # the first layer
        z_var = tf.nn.dropout(z_var, g_prob)
        d1 = tf.nn.relu(
            batch_normal(de_conv(z_var,
                                 weights['wc1'],
                                 biases['bc1'],
                                 out_shape=[self.batch_size, 4, 4, 512],
                                 s=[1, 2, 2, 1],
                                 padding_='VALID'),
                         scope='gen_bn1'))
        print('d1', d1.shape)
        d1 = tf.nn.dropout(d1, g_prob)
        d2 = tf.nn.relu(
            batch_normal(de_conv(d1,
                                 weights['wc2'],
                                 biases['bc2'],
                                 out_shape=[self.batch_size, 8, 8, 256]),
                         scope='gen_bn2'))

        d2 = tf.nn.dropout(d2, g_prob)
        d3 = tf.nn.relu(
            batch_normal(de_conv(d2,
                                 weights['wc3'],
                                 biases['bc3'],
                                 out_shape=[self.batch_size, 16, 16, 128]),
                         scope='gen_bn3'))

        d3 = tf.nn.dropout(d3, g_prob)
        d4 = tf.nn.relu(
            batch_normal(de_conv(d3,
                                 weights['wc4'],
                                 biases['bc4'],
                                 out_shape=[self.batch_size, 32, 32, 64]),
                         scope='gen_bn4'))

        d5 = tf.tanh(
            de_conv(d4,
                    weights['wc5'],
                    biases['bc5'],
                    out_shape=[self.batch_size, 64, 64, self.channel]))
        print('d5', d5.shape)

        return d5
Exemple #27
0
    def generate(self, z_var, conv1, conv2, conv3, reuse=False):

        with tf.variable_scope('generator') as scope:

            if reuse == True:
                scope.reuse_variables()

            d1 = tf.nn.relu(
                batch_normal(fully_connect(z_var,
                                           output_size=4 * 4 * 256,
                                           scope='gen_fully1'),
                             scope='gen_bn1',
                             reuse=reuse,
                             isTrain=self.isTrain))
            d2 = tf.reshape(d1, [self.batch_size, 4, 4, 256])
            d2 = tf.nn.relu(
                batch_normal(de_conv(d2,
                                     output_shape=[self.batch_size, 8, 8, 256],
                                     name='gen_deconv2'),
                             scope='gen_bn2',
                             reuse=reuse,
                             isTrain=self.isTrain)) + conv3
            print('d2_shape', d2.get_shape())
            d3 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, 16, 16, 128],
                    name='gen_deconv3'),
                             scope='gen_bn3',
                             reuse=reuse,
                             isTrain=self.isTrain)) + conv2
            print('d3_shape', d3.get_shape())
            d4 = tf.nn.relu(
                batch_normal(de_conv(
                    d3,
                    output_shape=[self.batch_size, 32, 32, 64],
                    name='gen_deconv4'),
                             scope='gen_bn4',
                             reuse=reuse,
                             isTrain=self.isTrain)) + conv1
            print('d4_shape()', d4.get_shape())
            d5 = tf.nn.relu(
                batch_normal(de_conv(
                    d4,
                    output_shape=[self.batch_size, 64, 64, 64],
                    name='gen_deconv5'),
                             scope='gen_bn5',
                             reuse=reuse,
                             isTrain=self.isTrain))
            print('d5_shape', d5.get_shape())
            d6 = conv2d(d5, output_dim=3, d_h=1, d_w=1, name='gen_conv6')
            print('d6_shape', d6.get_shape())
            return tf.nn.tanh(d6)
Exemple #28
0
    def encode_y(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='eny_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='eny_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])

        result_y = tf.nn.sigmoid(fully_connect(c2, weights['e3'], biases['eb3']))

        #y_vec = tf.one_hot(tf.arg_max(result_y, 1), 10)

        return result_y
Exemple #29
0
    def generate(self, z_var, reuse=False):
        # the size of z_var 32 * 128
        with tf.variable_scope('generator') as scope:

            if reuse == True:
                scope.reuse_variables()

            d1 = tf.nn.relu(
                batch_normal(fully_connect(z_var,
                                           output_size=4 * 4 * 256,
                                           scope='gen_fully1'),
                             scope='gen_bn1',
                             reuse=reuse))
            d2 = tf.reshape(d1, [int(self.batch_size / 2), 4, 4, 256])
            d2 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[int(self.batch_size / 2), 8, 8, 256],
                    name='gen_deconv2'),
                             scope='gen_bn2',
                             reuse=reuse))
            d3 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[int(self.batch_size / 2), 16, 16, 128],
                    name='gen_deconv3'),
                             scope='gen_bn3',
                             reuse=reuse))
            d4 = tf.nn.relu(
                batch_normal(de_conv(
                    d3,
                    output_shape=[int(self.batch_size / 2), 32, 32, 64],
                    name='gen_deconv4'),
                             scope='gen_bn4',
                             reuse=reuse))
            d5 = tf.nn.relu(
                batch_normal(de_conv(
                    d4,
                    output_shape=[int(self.batch_size / 2), 64, 64, 32],
                    name='gen_deconv5'),
                             scope='gen_bn5',
                             reuse=reuse))
            d6 = de_conv(d5,
                         output_shape=[int(self.batch_size / 2), 64, 64, 3],
                         name='gen_deconv6',
                         d_h=1,
                         d_w=1)

            return tf.nn.tanh(d6)
Exemple #30
0
    def generate(self, z_var, reuse=False):

        with tf.variable_scope('generator') as scope:

            if reuse == True:
                scope.reuse_variables()

            d1 = tf.nn.relu(batch_normal(fully_connect(z_var , output_size=8*8*256, scope='gen_fully1'), scope='gen_bn1', reuse=reuse))
            d2 = tf.reshape(d1, [self.batch_size, 8, 8, 256])
            d2 = tf.nn.relu(batch_normal(de_conv(d2 , output_shape=[self.batch_size, 16, 16, 256], name='gen_deconv2'), scope='gen_bn2', reuse=reuse))
            d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, 32, 32, 128], name='gen_deconv3'), scope='gen_bn3', reuse=reuse))
            d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[self.batch_size, 64, 64, 32], name='gen_deconv4'), scope='gen_bn4', reuse=reuse))
            d5 = de_conv(d4, output_shape=[self.batch_size, 64, 64, 3], name='gen_deconv5', d_h=1, d_w=1)

            return tf.nn.tanh(d5)