Exemple #1
0
        def build_generator():
            ### First block shape
            dim = int(self.image_size/16) # 96/16
            depth = 1024
            ### DNN layer: transform inputs (bs,noise_dim) to higher dims (bs,4,4,1024)
            with tf.variable_scope("generator") as scope:
                W1 = tf.get_variable("W1",[self.noise_dim,dim*dim*depth],tf.float32,
                                    tf.truncated_normal_initializer(stddev=0.02))
                B1 = tf.get_variable("B1",[dim*dim*depth],tf.float32,
                                    tf.truncated_normal_initializer(stddev=0.02))
                DNN_output = tf.matmul(z,W1) + B1
                DNN_output = tf.nn.relu(DNN_output)
                DNN_output = tf.reshape(DNN_output,[-1,dim,dim,depth]) ###(bs,dim,dim,depth)
                print(DNN_output.shape)

                ### Deconv layers
                bs, d_w, d_h, depth = DNN_output.get_shape().as_list()
                deconv1 = ut.deconv2d(DNN_output,[bs,d_w*2,d_h*2,int(depth/2)],name="deconv1")
                print(deconv1.shape)
                bs, d_w, d_h, depth = deconv1.get_shape().as_list()
                deconv2 = ut.deconv2d(deconv1,[bs,d_w*2,d_h*2,int(depth/2)],name="deconv2")
                print(deconv2.shape)
                bs, d_w, d_h, depth = deconv2.get_shape().as_list()
                deconv3 = ut.deconv2d(deconv2,[bs,d_w*2,d_h*2,int(depth/2)],name="deconv3")
                print(deconv3.shape)
                bs, d_w, d_h, depth = deconv3.get_shape().as_list()
                gen_image = ut.deconv2d(deconv3,[bs,d_w*2,d_h*2,self.image_depth],
                                        f_h=3,f_w=3,name="deconv4",relu=False)
                gen_image = tf.nn.tanh(gen_image)
                print(gen_image.shape)
            return gen_image
Exemple #2
0
def add_residual_pre(prev_layer,
                     z_concat=None,
                     text_filters=None,
                     k_h=5,
                     k_w=5,
                     hidden_text_filters=None,
                     hidden_filters=None,
                     name_func=None):

    filters = prev_layer.get_shape()[3].value
    if hidden_filters == None:
        hidden_filters = filters * 4
    if text_filters == None:
        text_filters = int(filters / 2)
    if hidden_text_filters == None:
        hidden_text_filters = int(filters / 8)
    s = prev_layer.get_shape()[1].value

    bn0 = util.batch_norm(name=g_name())
    bn1 = util.batch_norm(name=g_name())

    low_dim = util.conv2d(util.lrelu(bn0(prev_layer)),
                          hidden_filters,
                          k_h=k_h,
                          k_w=k_w,
                          name=name_func())

    residual = util.deconv2d(util.lrelu(bn1(low_dim), name=name_func()),
                             [batch_size, s, s, filters],
                             k_h=k_h,
                             k_w=k_w,
                             name=name_func())

    next_layer = prev_layer + residual
    return next_layer
def generator(z_input, y_label,
              batch_size=10,
              dim_con=64,
              dim_fc=1024,
              reuse=False):
    """
    Args:
        z_input: input noise tensor, float - [batch_size, DIM_Z=100]
        y_label: input label tensor, float - [batch_size, DIM_Y=10]
        batch_size:
        dim_con:
        dim_fc:
        reuse:
    Returns:
        x': the generated image tensor, float - [batch_size, DIM_IMAGE=784]
    """
    with tf.variable_scope("generator") as scope:
        if reuse:
            scope.reuse_variables()

        # create z as the joint representation of the input noise and the label
        z = tf.concat([z_input, y_label], 1)

        tf.summary.histogram('act_g0', z)

        # first fully-connected layer
        g1 = tf.nn.relu(tf.contrib.layers.batch_norm(linear(
            x_input=z,
            dim_in=DIM_Z + DIM_Y,
            dim_out=dim_fc,
            name='g1'), epsilon=1e-5, scope='g1_bn'))

        # join the output of the previous layer with the labels vector
        g1 = tf.concat([g1, y_label], 1)

        tf.summary.histogram('act_g1', g1)

        # second fully-connected layer
        g2 = tf.nn.relu(tf.contrib.layers.batch_norm(linear(
            x_input=g1,
            dim_in=g1.get_shape().as_list()[-1],
            dim_out=dim_con * 2 * IMAGE_SIZE / 4 * IMAGE_SIZE / 4,
            name='g2'), epsilon=1e-5, scope='g2_bn'))

        # create a joint 4-D feature representation of the output of the previous layer and the label
        # to serve as a 7x7 input image for the next de-convolution layer
        y_ = tf.reshape(y_label, [batch_size, 1, 1, DIM_Y])
        g2 = tf.reshape(g2, [batch_size, IMAGE_SIZE / 4, IMAGE_SIZE / 4, dim_con * 2])
        g2 = concat(g2, y_)

        tf.summary.histogram('act_g2', g2)

        # first layer of deconvolution produces a larger 14x14 image
        g3 = deconv2d(g2, [batch_size, IMAGE_SIZE / 2, IMAGE_SIZE / 2, dim_con * 2], 'g3')

        # apply batch normalization to ___
        # apply ReLU to stabilize the output of this layer
        g3 = tf.nn.relu(tf.contrib.layers.batch_norm(g3, epsilon=1e-5, scope='g3_bn'))

        # join the output of the previous layer with the labels vector
        g3 = concat(g3, y_)

        tf.summary.histogram('act_g3', g3)

        # second layer of deconvolution produces the final sized 28x28 image
        g4 = deconv2d(g3, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], 'x')

        # no batch normalization in the final layer but a sigmoid activation function is used to
        # generate a sharp and crisp image vector; dimension - [28, 28, 1]
        g4 = tf.nn.sigmoid(g4)

        tf.summary.histogram('act_g4', g4)

        return g4