Exemplo n.º 1
0
def sample_net(batch_size, z, y, output_size):
    z = tf.concat([z, y], 1)
    yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
    c1, c2 = output_size / 4, output_size / 2

    # 10 stand for the num of labels
    d1 = fully_connect(z, weights2['wd'], biases2['bd'])
    d1 = batch_normal(d1, scope="genbn1", reuse=True)
    d1 = tf.nn.relu(d1)
    d1 = tf.concat([d1, y], 1)

    d2 = fully_connect(d1, weights2['wc1'], biases2['bc1'])
    d2 = batch_normal(d2, scope="genbn2", reuse=True)
    d2 = tf.nn.relu(d2)
    d2 = tf.reshape(d2, [batch_size, c1, c1, 64 * 2])
    d2 = conv_cond_concat(d2, yb)

    d3 = de_conv(d2,
                 weights2['wc2'],
                 biases2['bc2'],
                 out_shape=[batch_size, c2, c2, 128])
    d3 = batch_normal(d3, scope="genbn3", reuse=True)
    d3 = tf.nn.relu(d3)
    d3 = conv_cond_concat(d3, yb)

    d4 = de_conv(d3,
                 weights2['wc3'],
                 biases2['bc3'],
                 out_shape=[batch_size, output_size, output_size, channel])

    return tf.nn.sigmoid(d4)
Exemplo n.º 2
0
    def discriminator(self, image, y=None, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()
            if not self.y_dim:
                h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
                h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
                h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
                h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
                h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')
                return tf.nn.sigmoid(h4), h4
            else:
                yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                x = conv_cond_concat(image, yb)

                h0 = lrelu(conv2d(x, self.c_dim+self.y_dim, name='d_h0_conv'))
                h0 = conv_cond_concat(h0, yb)

                h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim+self.y_dim, name='d_h1_conv')))
                h1 = tf.reshape(h1, [self.batch_size, -1])
                h1 = tf.concat([h1, y], 1)

                h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin')))
                h2 = tf.concat([h2, y], 1)

                h3 = linear(h2, 1, 'd_h3_lin')
                return tf.nn.sigmoid(h3), h3
Exemplo n.º 3
0
    def patch_generator(self, index, z, y=None):
        """
        patch generator
        :param z: 
        :param y: 
        :param scope: 
        :return: 
        """
        with tf.variable_scope("generator_%d" % index) as scope:
            s_h, s_w = self.output_height/int(np.sqrt(self.num_patches)), \
                       int(self.output_width/np.sqrt(self.num_patches)) # 16, 16
            s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 8, 8
            s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 4, 4

            yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim + 2])
            z = tf.concat([z, y], 1)

            h0 = tf.nn.relu(self.g_bns_all[index][0](linear(
                z, self.gfc_dim, "g_h0_lin")))
            h0 = tf.concat([h0, y], 1)

            h1 = tf.nn.relu(self.g_bns_all[index][1](linear(
                h0, self.gf_dim * 4 * s_h4 * s_w4, "g_h1_lin")))
            h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 4])
            h1 = conv_cond_concat(h1, yb)

            h2 = deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2],
                          name="g_h2")
            h2 = self.g_bns_all[index][2](h2)
            h2 = tf.nn.relu(h2)
            h2 = conv_cond_concat(h2, yb)

            return tf.nn.sigmoid(
                deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim],
                         name="g_h3"))
Exemplo n.º 4
0
    def discriminate(self, x_var, y, weights, biases, reuse=False):

        y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
        x_var = conv_cond_concat(x_var, y1)

        conv1 = lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))

        conv1 = conv_cond_concat(conv1, y1)

        conv2 = lrelu(
            batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']),
                         scope='dis_bn1',
                         reuse=reuse))

        conv2 = tf.reshape(conv2, [self.batch_size, -1])

        conv2 = tf.concat([conv2, y], 1)

        fc1 = lrelu(
            batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']),
                         scope='dis_bn2',
                         reuse=reuse))

        fc1 = tf.concat([fc1, y], 1)
        #for D
        output = fully_connect(fc1, weights['wd'], biases['bd'])

        return tf.nn.sigmoid(output)
    def get_generator_net(self, name):
        batch_size = self.get_batch_size()
        with tf.variable_scope('generator') as scope:

            z = tf.concat([self.noise, self.labels], 1)
            net = ops.linear(
                z,
                output_size=1024,
                scope='gen_fully1',
                weights_initializer=self.get_weights_initializer())
            net = ops.batch_norm(net, self.is_training, scope='gen_bn1')
            net = ops.lrelu(net)

            net = tf.concat([net, self.labels], 1)
            # Wonder what this will be doing if the size is not divisible by 4
            h, w = self.data.shape[0] // 4, self.data.shape[1] // 4
            net = ops.linear(
                net,
                output_size=h * w * 2 * batch_size,
                scope='gen_fully2',
                weights_initializer=self.get_weights_initializer())
            net = ops.batch_norm(net, self.is_training, scope='gen_bn2')
            net = ops.lrelu(net)

            net = tf.reshape(net, [batch_size, h, w, 2 * batch_size])
            yb = tf.reshape(self.labels, shape=[batch_size, 1, 1, -1])
            net = ops.conv_cond_concat(net, yb)

            h, w = self.data.shape[0] // 2, self.data.shape[1] // 2
            net = ops.deconv2d(
                net, [batch_size, h, w, 2 * batch_size],
                4,
                4,
                2,
                2,
                scope='gen_deconv1',
                weights_initializer=self.get_weights_initializer())
            net = ops.batch_norm(net, self.is_training, scope='gen_bn3')
            net = ops.lrelu(net)

            net = ops.conv_cond_concat(net, yb)
            out = ops.deconv2d(net,
                               self.images.shape,
                               4,
                               4,
                               2,
                               2,
                               scope='gen_deconv2',
                               weights_initializer=xavier_initializer())

            return tf.nn.sigmoid(out, name=name), out
Exemplo n.º 6
0
    def sampler(self, z, y=None):
        with tf.variable_scope("generator") as scope:
            scope.reuse_variables()
            if not self.y_dim:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
                s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
                s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
                s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)

                # project `z` and reshape
                h0 = tf.reshape(
                    linear(z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin'),
                           [-1, s_h16, s_w16, self.gf_dim * 8])
                h0 = tf.nn.relu(self.g_bn0(h0, train=False))

                h1 = deconv2d(h0, [self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1')
                h1 = tf.nn.relu(self.g_bn1(h1, train=False))

                h2 = deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2')
                h2 = tf.nn.relu(self.g_bn2(h2, train=False))

                h3 = deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3')
                h3 = tf.nn.relu(self.g_bn3(h3, train=False))

                h4 = deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4')

                return tf.nn.tanh(h4)
            else:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
                s_w2, s_w4 = int(s_w / 2), int(s_w / 4)

                # yb = tf.reshape(y, [-1, 1, 1, self.y_dim])
                yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                z = tf.concat([z, y], 1)

                h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False))
                h0 = tf.concat([h0, y], 1)

                h1 = tf.nn.relu(self.g_bn1(
                    linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin'), train=False))
                h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])
                h1 = conv_cond_concat(h1, yb)

                h2 = tf.nn.relu(self.g_bn2(
                    deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'), train=False))
                h2 = conv_cond_concat(h2, yb)

                return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))
Exemplo n.º 7
0
    def generator(self, z, y=None):
        with tf.variable_scope('generator') as scope:
            if not self.y_dim:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
                s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
                s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_h4, 2)
                s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)

                # project z and reshape
                self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True)
                # reshape vector to matrix
                self.h0 = tf.reshape(self.z_, [-1, s_h16, s_w16, self.gf_dim*8])
                h0 = tf.nn.relu(self.g_bn0(self.h0))

                self.h1, self.h1_w, self.h1_b = \
                    deconv2d(h0, [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1', with_w=True)
                h1 = tf.nn.relu(self.g_bn1(self.h1))

                h2, self.h2_w, self.h2_b = \
                    deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2', with_w=True)
                h2 = tf.nn.relu(self.g_bn2(self.h2))

                h3, self.h3_w, self.h3_b = \
                    deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim], name='g_h3', with_w=True)
                h3 = tf.nn.relu(self.g_bn3(self.h3))

                h4 = self.h4_w, self.h4_b = \
                    deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True)
                return tf.nn.tanh(h4)
            else:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_h4 = int(s_h/2), int(s_h/4)
                s_w2, s_w4 = int(s_w / 2), int(s_w / 4)

                # tf.expand_dims(tf.expand_dims(y, 1), 2)
                yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                z = tf.concat([z, y], 1)

                h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin')))
                h0 = tf.concat([h0, y], 1)

                h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin')))
                h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2])
                h1 = conv_cond_concat(h1, yb)

                h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim*2], name='g_h2')))
                h2 = conv_cond_concat(h2, yb)

                return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))
Exemplo n.º 8
0
    def gern_net(self, z, y):

        with tf.variable_scope('generator') as scope:

            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            z = tf.concat([z, y], 1)
            # c1, c2 = self.output_size / 4, self.output_size / 2
            c1_row, c2_row = int(self.output_size_row / 4), int(
                self.output_size_row / 2)
            c1_col, c2_col = int(self.output_size_col / 4), int(
                self.output_size_col / 2)

            # 10 stand for the num of labels
            d1 = tf.nn.relu(
                batch_normal(fully_connect(z,
                                           output_size=1024,
                                           scope='gen_fully'),
                             scope='gen_bn1'))

            d1 = tf.concat([d1, y], 1)

            d2 = tf.nn.relu(
                batch_normal(fully_connect(d1,
                                           output_size=c1_row * c1_col * 2 *
                                           64,
                                           scope='gen_fully2'),
                             scope='gen_bn2'))

            d2 = tf.reshape(d2, [self.batch_size, c1_row, c1_col, 64 * 2])
            d2 = conv_cond_concat(d2, yb)

            d3 = tf.nn.relu(
                batch_normal(de_conv(
                    d2,
                    output_shape=[self.batch_size, c2_row, c2_col, 128],
                    name='gen_deconv1'),
                             scope='gen_bn3'))

            d3 = conv_cond_concat(d3, yb)

            d4 = de_conv(d3,
                         output_shape=[
                             self.batch_size, self.output_size_row,
                             self.output_size_col, self.channel
                         ],
                         name='gen_deconv2')

            return tf.nn.sigmoid(d4)
def dis_net(data_array , y , weights , biases , reuse=False):

    # mnist data's shape is (28 , 28 , 1)

    y = tf.reshape(y , shape=[batch_size, 1 , 1 , y_dim])
    # concat
    data_array = conv_cond_concat(data_array , y)

    conv1 = conv2d(data_array , weights['wc1'] , biases['bc1'])

    tf.add_to_collection('weight_1', weights['wc1'])

    conv1 = lrelu(conv1)

    tf.add_to_collection('ac_1' , conv1)

    conv2 = conv2d(conv1 , weights['wc2']  , biases['bc2'])
    conv2 = batch_normal(conv2 ,scope="dis_bn1" , reuse=reuse)
    conv2 = lrelu(conv2)

    tf.add_to_collection('weight_2', weights['wc2'])

    tf.add_to_collection('ac_2', conv2)

    conv2 = tf.reshape(conv2 , [batch_size , -1])

    f1 = fully_connect(conv2 ,weights['wc3'] , biases['bc3'])
    f1 = batch_normal(f1 , scope="dis_bn2" , reuse=reuse)
    f1 = lrelu(f1)

    out = fully_connect( f1 , weights['wd'] , biases['bd'])

    return tf.nn.sigmoid(out) , out
def dcwgan_cond(model, image, y, reuse=False):
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()
        yb = tf.reshape(y, [-1, 1, 1, model.y_dim])
        x = conv_cond_concat(image, yb)
        h0 = lrelu(conv2d(x, model.df_dim, name='d_h0_conv'))
        h0 = conv_cond_concat(h0, yb)
        h1 = lrelu(layer_norm(conv2d(h0, model.df_dim*2, name='d_h1_conv'), name='d_ln1'))
        h1 = conv_cond_concat(h1, yb)
        h2 = lrelu(layer_norm(conv2d(h1, model.df_dim*4, name='d_h2_conv'), name='d_ln2'))
        h2 = conv_cond_concat(h2, yb)
        h3 = lrelu(layer_norm(conv2d(h2, model.df_dim*8, name='d_h3_conv'), name='d_ln3'))
        shape = np.product(h3.get_shape()[1:].as_list())
        reshaped = tf.reshape(h3, [-1, shape])
        cond = concat([reshaped,y],1)
        h4 = linear(cond, 1, 'd_h4_lin')
        return h4
def wgan_slim_cond(model, image, y, reuse=False):
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()
        yb = tf.reshape(y, [-1, 1, 1, model.y_dim])
        image_ = conv_cond_concat(image, yb)
        h0 = lrelu(layer_norm(conv2d(image_, model.df_dim, k_h=4, k_w=4, name='d_h0_conv',padding='VALID')))
        h0 = conv_cond_concat(h0, yb)
        h1 = lrelu(layer_norm(conv2d(h0, model.df_dim*4, k_h=4, k_w=4, name='d_h1_conv', padding='VALID')))
        h1 = conv_cond_concat(h1, yb)
        h2 = lrelu(layer_norm(conv2d(h1, model.df_dim*8, k_h=4, k_w=4, name='d_h2_conv', padding='VALID')))
        h2 = conv_cond_concat(h2, yb)

        shape = np.product(h2.get_shape()[1:].as_list())
        h3 = tf.reshape(h2, [-1, shape])
        h3 = concat([h3,y],1)

        r_out = linear(h3, 1, 'd_ro_lin')
        return r_out
Exemplo n.º 12
0
    def dis_net(self, images, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # mnist data's shape is (28 , 28 , 1)
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # concat
            concat_data = conv_cond_concat(images, yb)

            conv1, w1 = conv2d(concat_data, output_dim=10, name='dis_conv1')
            tf.add_to_collection('weight_1', w1)

            conv1 = lrelu(conv1)
            conv1 = conv_cond_concat(conv1, yb)
            tf.add_to_collection('ac_1', conv1)

            conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
            tf.add_to_collection('weight_2', w2)

            conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
            tf.add_to_collection('ac_2', conv2)

            conv2 = tf.reshape(conv2, [self.batch_size, -1])
            conv2 = tf.concat([conv2, y], 1)

            f1 = lrelu(
                batch_normal(fully_connect(conv2,
                                           output_size=1024,
                                           scope='dis_fully1'),
                             scope='dis_bn2',
                             reuse=reuse))
            f1 = tf.concat([f1, y], 1)

            out = fully_connect(f1,
                                output_size=1,
                                scope='dis_fully2',
                                initializer=xavier_initializer())

            return tf.nn.sigmoid(out), out
Exemplo n.º 13
0
    def dis_net(self, images, y, reuse=False):    #D的输入层不加BN层
        with tf.variable_scope("discriminator") as scope:
            if reuse == True:
                scope.reuse_variables()

            # mnist data's shape is (28 , 28 , 1)
            # ? 1 1 10
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # concat ? 28 28 11
            concat_data = conv_cond_concat(images, yb)
            # ? 14 14 10 
            #  w1 3 3 11 10  卷积核
            conv1, w1 = conv2d(concat_data, output_dim=10, name='dis_conv1')
            tf.add_to_collection('weight_1', w1)

            conv1 = lrelu(conv1)
            # ? 14 14 20
            conv1 = conv_cond_concat(conv1, yb) # 再连接条件
            tf.add_to_collection('ac_1', conv1)

            # ? 7 7 64 
            # w2 3 3 20 64
            conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
            tf.add_to_collection('weight_2', w2)

            conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
            tf.add_to_collection('ac_2', conv2)  #将元素conv2添加到列表ac_2中
            
            # ? 3136 -> 7*7*64
            conv2 = tf.reshape(conv2, [self.batch_size, -1])
            # ? 3146
            conv2 = tf.concat([conv2, y], 1) # 再加

            # ? 1024
            f1 = lrelu(batch_normal(fully_connect(conv2, output_size=1024, scope='dis_fully1'), scope='dis_bn2', reuse=reuse))
            # ? 1034
            f1 = tf.concat([f1, y], 1)

            # 加一个全连接 ? 1
            out = fully_connect(f1, output_size=1, scope='dis_fully2',  initializer = xavier_initializer())

            return tf.nn.sigmoid(out), out
Exemplo n.º 14
0
    def generate(self, z_var, y, weights, biases):

        #add the first layer

        z_var = tf.concat([z_var, y], 1)

        d1 = tf.nn.relu(
            batch_normal(fully_connect(z_var, weights['wd'], biases['bd']),
                         scope='gen_bn1'))

        #add the second layer

        d1 = tf.concat([d1, y], 1)

        d2 = tf.nn.relu(
            batch_normal(fully_connect(d1, weights['wc1'], biases['bc1']),
                         scope='gen_bn2'))

        d2 = tf.reshape(d2, [self.batch_size, 7, 7, 128])
        y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])

        d2 = conv_cond_concat(d2, y)

        d3 = tf.nn.relu(
            batch_normal(de_conv(d2,
                                 weights['wc2'],
                                 biases['bc2'],
                                 out_shape=[self.batch_size, 14, 14, 64]),
                         scope='gen_bn3'))

        d3 = conv_cond_concat(d3, y)

        output = de_conv(d3,
                         weights['wc3'],
                         biases['bc3'],
                         out_shape=[self.batch_size, 28, 28, 1])

        return tf.nn.sigmoid(output)
Exemplo n.º 15
0
def dis_net(data_array, y, weights, biases, reuse=False):
    # mnist data's shape is (28, 28, 1)
    yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
    data_array = conv_cond_concat(data_array, yb)

    print("-------------")
    print(data_array.get_shape())
    if channel == 1:
        conv1 = conv2d(data_array, weights['wc1'], biases['bc1'])
    else:
        conv1 = conv3d(data_array, weights['wc1_1'], biases['bc1'])
    conv1 = lrelu(conv1)
    conv1 = conv_cond_concat(conv1, yb)

    if (channel == 1):
        conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    else:
        conv2 = conv3d(conv1, weights['wc2_1'], biases['bc2'])
    conv2 = batch_normal(conv2, scope="dis_bn1", reuse=reuse)
    conv2 = lrelu(conv2)
    conv2 = tf.reshape(conv2, [batch_size, -1])
    conv2 = tf.concat([conv2, y], 1)

    # 可视化存在收集器
    tf.add_to_collection('weight_1', weights['wc1'])
    tf.add_to_collection('ac_1', conv1)
    tf.add_to_collection('weight_2', weights['wc2'])
    tf.add_to_collection('ac_2', conv2)

    f1 = fully_connect(conv2, weights['wc3'], biases['bc3'])
    f1 = batch_normal(f1, scope="dis_bn2", reuse=reuse)
    f1 = lrelu(f1)
    f1 = tf.concat([f1, y], 1)

    out = fully_connect(f1, weights['wd'], biases['bd'])

    return tf.nn.sigmoid(out), out
Exemplo n.º 16
0
    def generator(self, z_set, y_set):
        """
        Fully generator
        :param z_set: 
        :param y_set: 
        :return: 
        """
        z = tf.concat([v for v in z_set], axis=1)
        y = y_set[0]
        with tf.variable_scope("generator_f") as scope:
            s_h, s_w = self.output_height, self.output_width # 32, 32
            s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 16, 16
            s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 8, 8
            s_h8, s_w8 = int(s_h4 / 2), int(s_w4 / 2)  # 4, 4

            yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim + 2])
            z = tf.concat([z, y], 1)

            h0 = tf.nn.relu(self.g_bn_f0(linear(z, self.gfc_dim, "g_h0_lin")))
            h0 = tf.concat([h0, y], 1)

            h1 = tf.nn.relu(self.g_bn_f1(linear(h0, self.gf_dim * 4 * s_h8 * s_w8, "g_h1_lin")))
            h1 = tf.reshape(h1, [self.batch_size, s_h8, s_w8, self.gf_dim * 4])
            h1 = conv_cond_concat(h1, yb)

            h2 = deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name="g_h2")
            h2 = self.g_bn_f2(h2)
            h2 = tf.nn.relu(h2)
            h2 = conv_cond_concat(h2, yb)

            h3 = deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim], name="g_h3")
            h3 = self.g_bn_f3(h3)
            h3 = tf.nn.relu(h3)
            h3 = conv_cond_concat(h3, yb)

            return tf.nn.sigmoid(deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name="g_h4"))
Exemplo n.º 17
0
    def discriminate(self, x_var, y, weights, biases, reuse=False):

        # the first layer; No BN; leaky relu;
        conv1 = lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))
        # concat x_var and y
        y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
        conv1 = conv_cond_concat(conv1, y1)
        print('x_var', x_var.shape)
        print('conv1', conv1.shape)
        # the second layer
        conv1 = tf.nn.dropout(conv1, 0.8)
        conv2 = lrelu(
            batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']),
                         scope='dis_bn2',
                         reuse=reuse))
        print('conv2', conv2.shape)
        conv2 = tf.nn.dropout(conv2, 0.8)
        # the third layer
        conv3 = lrelu(
            batch_normal(conv2d(conv2, weights['wc3'], biases['bc3']),
                         scope='dis_bn3',
                         reuse=reuse))
        conv3 = tf.nn.dropout(conv3, 0.8)
        # the fourth layer
        conv4 = lrelu(
            batch_normal(conv2d(conv3,
                                weights['wc4'],
                                biases['bc4'],
                                padding_='VALID'),
                         scope='dis_bn4',
                         reuse=reuse))
        conv4 = tf.nn.dropout(conv4, 0.8)
        print('conv4', conv4.shape)
        # the fifth layer, strides ==1 while the default is 2
        con = conv2d(conv4, weights['wc5'], biases['bc5'], strides=1)
        conv5 = tf.nn.sigmoid(con)
        print('conv5', conv5.shape)
        conv5 = tf.squeeze(conv5, [1, 2])
        print('discriminator is done')

        return conv5
Exemplo n.º 18
0
    def generator(self, z, y=None):
        with tf.variable_scope("generator"):
            if self.y_dim is None:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_w2 = (conv_out_size_same(s_h, 2),
                              conv_out_size_same(s_w, 2))
                s_h4, s_w4 = (conv_out_size_same(s_h2, 2),
                              conv_out_size_same(s_w2, 2))
                s_h8, s_w8 = (conv_out_size_same(s_h4, 2),
                              conv_out_size_same(s_w4, 2))
                s_h16, s_w16 = (conv_out_size_same(s_h8, 2),
                                conv_out_size_same(s_w8, 2))

                # project `z` and reshape
                self.z_, self.h0_w, self.h0_b = linear(
                    z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True)

                self.h0 = tf.reshape(
                    self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
                h0 = tf.nn.relu(self.g_bn0(self.h0))

                self.h1, self.h1_w, self.h1_b = deconv2d(
                    h0, [self.batch_size, s_h8, s_w8, self.gf_dim*4],
                    name='g_h1', with_w=True)
                h1 = tf.nn.relu(self.g_bn1(self.h1))

                h2, self.h2_w, self.h2_b = deconv2d(
                    h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2],
                    name='g_h2', with_w=True)
                h2 = tf.nn.relu(self.g_bn2(h2))

                h3, self.h3_w, self.h3_b = deconv2d(
                    h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1],
                    name='g_h3', with_w=True)
                h3 = tf.nn.relu(self.g_bn3(h3))

                h4, self.h4_w, self.h4_b = deconv2d(
                    h3, [self.batch_size, s_h, s_w, self.c_dim],
                    name='g_h4', with_w=True)

                return tf.nn.tanh(h4)

            else:
                s_h, s_w = self.output_height, self.output_width
                s_h2, s_h4 = s_h // 2, s_h // 4
                s_w2, s_w4 = s_w // 2, s_w // 4

                # yb = tf.expand_dims(tf.expand_dims(y, 1),2)
                yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
                z = concat([z, y], 1)

                h0 = tf.nn.relu(
                    self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin')))
                h0 = concat([h0, y], 1)

                h1 = tf.nn.relu(self.g_bn1(
                    linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin')))
                h1 = tf.reshape(
                    h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])

                h1 = conv_cond_concat(h1, yb)

                h2 = tf.nn.relu(
                    self.g_bn2(
                        deconv2d(
                            h1,
                            [self.batch_size, s_h2, s_w2, self.gf_dim * 2],
                            name='g_h2'
                        )
                    )
                )
                h2 = conv_cond_concat(h2, yb)

                return tf.nn.sigmoid(
                    deconv2d(
                        h2,
                        [self.batch_size, s_h, s_w, self.c_dim],
                        name='g_h3'
                    )
                )
Exemplo n.º 19
0
def wgan_cond(model, image, y, reuse=False):
    #no batchnorm for WGAN GP
    with tf.compat.v1.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()

        yb = tf.reshape(y, [-1, 1, 1, model.y_dim])
        image_ = conv_cond_concat(image, yb)
        h0 = lrelu(
            layer_norm(
                conv2d(image_,
                       model.df_dim,
                       k_h=4,
                       k_w=4,
                       name='d_h0_conv',
                       padding='VALID')))
        h0 = conv_cond_concat(h0, yb)
        h1 = lrelu(
            layer_norm(
                conv2d(h0,
                       model.df_dim * 4,
                       k_h=4,
                       k_w=4,
                       name='d_h1_conv',
                       padding='VALID')))
        h1 = conv_cond_concat(h1, yb)
        h2 = lrelu(
            layer_norm(
                conv2d(h1,
                       model.df_dim * 8,
                       k_h=4,
                       k_w=4,
                       name='d_h2_conv',
                       padding='VALID')))
        h2 = conv_cond_concat(h2, yb)
        h3 = lrelu(
            layer_norm(
                conv2d(h2,
                       model.df_dim * 16,
                       k_h=4,
                       k_w=4,
                       name='d_h3_conv',
                       padding='VALID')))
        h3 = conv_cond_concat(h3, yb)
        h4 = lrelu(
            layer_norm(
                conv2d(h3,
                       model.df_dim * 32,
                       k_h=4,
                       k_w=4,
                       name='d_h4_conv',
                       padding='VALID')))
        h4 = conv_cond_concat(h4, yb)
        h5 = lrelu(
            layer_norm(
                conv2d(h4,
                       model.df_dim * 32,
                       k_h=4,
                       k_w=4,
                       name='d_h5_conv',
                       padding='VALID')))

        shape = np.product(h5.get_shape()[1:].as_list())
        h5 = tf.reshape(h5, [-1, shape])
        h5 = concat([h5, y], 1)

        r_out = linear(h5, 1, 'd_ro_lin')
        return r_out
Exemplo n.º 20
0
    def sampler(self, z, y=None):
        """
        Sample instance from noise
        :param z: list, contains self.num_patches's noise vector
        :param y: list, contains self.num_patches's condition vector
        :return: 
        """
        s_h, s_w = self.output_height / int(np.sqrt(self.num_patches)), int(
            self.output_height / np.sqrt(self.num_patches))  # 16, 16
        s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 8, 8
        s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 4, 4

        h = []
        for index in range(self.num_patches):
            with tf.variable_scope("generator%d" % index) as scope:
                scope.reuse_variables()
                yb = tf.reshape(y[index],
                                [self.batch_size, 1, 1, self.y_dim + 2])
                z_ = tf.concat([z[index], y[index]], 1)

                h0 = tf.nn.relu(self.g_bns_all[index][0](linear(
                    z_, self.gfc_dim, "g_h0_lin")))
                h0 = tf.concat([h0, y[index]], 1)

                h1 = tf.nn.relu(self.g_bns_all[index][1](linear(
                    h0, self.gf_dim * 4 * s_h4 * s_w4, "g_h1_lin")))
                h1 = tf.reshape(h1,
                                [self.batch_size, s_h4, s_w4, self.gf_dim * 4])
                h1 = conv_cond_concat(h1, yb)

                h2 = deconv2d(h1,
                              [self.batch_size, s_h2, s_w2, self.gf_dim * 2],
                              name="g_h2")
                h2 = self.g_bns_all[index][2](h2)
                h2 = tf.nn.relu(h2)
                h2 = conv_cond_concat(h2, yb)

                h3 = tf.nn.sigmoid(
                    deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim],
                             name="g_h3"))
                h.append(h3)

        hh_0 = tf.pad(h[0],
                      paddings=[[0, 0], [0, self.output_height / 2],
                                [0, self.output_width / 2], [0, 0]],
                      mode="CONSTANT")
        hh_1 = tf.pad(h[1],
                      paddings=[[0, 0], [self.output_height / 2, 0],
                                [0, self.output_width / 2], [0, 0]],
                      mode="CONSTANT")
        hh_2 = tf.pad(h[2],
                      paddings=[[0, 0], [0, self.output_height / 2],
                                [self.output_width / 2, 0], [0, 0]],
                      mode="CONSTANT")
        hh_3 = tf.pad(h[3],
                      paddings=[[0, 0], [self.output_height / 2, 0],
                                [self.output_width / 2, 0], [0, 0]],
                      mode="CONSTANT")

        hh = tf.add(tf.add(tf.add(hh_0, hh_1), hh_2), hh_3)
        return hh
Exemplo n.º 21
0
    def sampler(self, z_set, y_set=None):
        """
        Sample instance from noise
        :param z_set: list, contains self.num_patches's noise vector
        :param y_set: list, contains self.num_patches's condition vector
        :return: 
        """
        s_h, s_w = self.output_height / int(np.sqrt(self.num_patches)), \
                   int(self.output_width / np.sqrt(self.num_patches))  # 16, 16
        s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 8, 8
        s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 4, 4
        s_h8, s_w8 = int(s_h4 / 2), int(s_w4 / 2)  # 2, 2

        g_patchs = []
        for index in range(self.num_patches):
            with tf.variable_scope("generator_%d" % index) as scope:
                scope.reuse_variables()
                yb = tf.reshape(y_set[index], [self.batch_size, 1, 1, self.y_dim + 2])
                z = tf.concat([z_set[index], y_set[index]], 1)

                h0 = tf.nn.relu(self.g_bns_all[index][0](linear(z, self.gfc_dim, "g_h0_lin")))
                h0 = tf.concat([h0, y_set[index]], 1)

                h1 = tf.nn.relu(self.g_bns_all[index][1](linear(h0, self.gf_dim * 4 * s_h8 * s_w8, "g_h1_lin")))
                h1 = tf.reshape(h1, [self.batch_size, s_h8, s_w8, self.gf_dim * 4])
                h1 = conv_cond_concat(h1, yb)

                h2 = deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name="g_h2")
                h2 = self.g_bns_all[index][2](h2)
                h2 = tf.nn.relu(h2)
                h2 = conv_cond_concat(h2, yb)

                h3 = deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim], name="g_h3")
                h3 = self.g_bns_all[index][3](h3)
                h3 = tf.nn.relu(h3)
                h3 = conv_cond_concat(h3, yb)

                h4 = tf.nn.sigmoid(deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name="g_h4"))
                g_patchs.append(h4)

        hh_0 = tf.pad(g_patchs[0], paddings=[[0, 0], [0, self.output_height / 2], [0, self.output_width / 2], [0, 0]], mode="CONSTANT")
        hh_1 = tf.pad(g_patchs[1], paddings=[[0, 0], [self.output_height / 2, 0], [0, self.output_width / 2], [0, 0]], mode="CONSTANT")
        hh_2 = tf.pad(g_patchs[2], paddings=[[0, 0], [0, self.output_height / 2], [self.output_width / 2, 0], [0, 0]], mode="CONSTANT")
        hh_3 = tf.pad(g_patchs[3], paddings=[[0, 0], [self.output_height / 2, 0], [self.output_width / 2, 0], [0, 0]], mode="CONSTANT")

        hh = tf.add(tf.add(tf.add(hh_0, hh_1), hh_2), hh_3)

        z = tf.concat([v for v in z_set], axis=1)
        y = y_set[0]
        with tf.variable_scope("generator_f") as scope:
            scope.reuse_variables()
            s_h, s_w = self.output_height, self.output_width  # 32, 32
            s_h2, s_w2 = int(s_h / 2), int(s_w / 2)  # 16, 16
            s_h4, s_w4 = int(s_h2 / 2), int(s_w2 / 2)  # 8, 8
            s_h8, s_w8 = int(s_h4 / 2), int(s_w4 / 2)  # 4, 4

            yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim + 2])
            z = tf.concat([z, y], 1)

            h0 = tf.nn.relu(self.g_bn_f0(linear(z, self.gfc_dim, "g_h0_lin")))
            h0 = tf.concat([h0, y], 1)

            h1 = tf.nn.relu(self.g_bn_f1(linear(h0, self.gf_dim * 4 * s_h8 * s_w8, "g_h1_lin")))
            h1 = tf.reshape(h1, [self.batch_size, s_h8, s_w8, self.gf_dim * 4])
            h1 = conv_cond_concat(h1, yb)

            h2 = deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name="g_h2")
            h2 = self.g_bn_f2(h2)
            h2 = tf.nn.relu(h2)
            h2 = conv_cond_concat(h2, yb)

            h3 = deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim], name="g_h3")
            h3 = self.g_bn_f3(h3)
            h3 = tf.nn.relu(h3)
            h3 = conv_cond_concat(h3, yb)

            gf = tf.nn.sigmoid(deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name="g_h4"))

        return tf.add(gf, hh)