Exemplo n.º 1
0
def create_generator(Xin,
                     is_training,
                     Cout=1,
                     reuse=False,
                     networktype='ganG'):
    '''input : batchsize * latentD
       output: batchsize * 28 * 28 * 1'''
    with tf.variable_scope(networktype, reuse=reuse):
        Xout = dense(Xin,
                     is_training,
                     Cout=7 * 7 * 256,
                     act='reLu',
                     norm='batchnorm',
                     name='dense1')
        Xout = tf.reshape(Xout, shape=[-1, 7, 7, 256])  # 7
        Xout = deconv(Xout,
                      is_training,
                      kernel_w=4,
                      stride=2,
                      epf=2,
                      Cout=128,
                      act='reLu',
                      norm='batchnorm',
                      name='deconv1')  # 14
        Xout = deconv(Xout,
                      is_training,
                      kernel_w=4,
                      stride=2,
                      epf=2,
                      Cout=Cout,
                      act=None,
                      norm=None,
                      name='deconv2')  # 28
        Xout = tf.nn.sigmoid(Xout)
    return Xout
Exemplo n.º 2
0
def create_decoder(Xin,
                   is_training,
                   latentD,
                   Cout=1,
                   reuse=False,
                   networktype='cdaeD'):
    with tf.variable_scope(networktype, reuse=reuse):
        Xout = dense(Xin,
                     is_training,
                     Cout=7 * 7 * 256,
                     act='reLu',
                     norm='batchnorm',
                     name='dense1')
        Xout = tf.reshape(Xout, shape=[-1, 7, 7, 256])  # 7
        Xout = deconv(Xout,
                      is_training,
                      kernel_w=4,
                      stride=2,
                      Cout=256,
                      epf=2,
                      act='reLu',
                      norm='batchnorm',
                      name='deconv1')  # 14
        Xout = deconv(Xout,
                      is_training,
                      kernel_w=4,
                      stride=2,
                      Cout=Cout,
                      epf=2,
                      act=None,
                      norm=None,
                      name='deconv2')  # 28
        Xout = tf.nn.sigmoid(Xout)
    return Xout
Exemplo n.º 3
0
def dense(X,
          is_training,
          Cout,
          trainable=True,
          act='ReLu',
          norm=None,
          name='dense'):
    '''output = batchsize * Cout'''
    with tf.device('/gpu:0'):
        if X.get_shape().ndims == 4:
            shapeIn = X.get_shape().as_list()
            X = tf.reshape(X, shape=[-1, shapeIn[1] * shapeIn[2] * shapeIn[3]])

        X = tf.identity(X)
        shapeIn = X.get_shape().as_list()

        W = tf.get_variable(
            name='%s_W' % name,
            shape=[shapeIn[1], Cout],
            trainable=trainable,
            initializer=tf.random_normal_initializer(stddev=0.02))

        Y = tf.matmul(X, W)
        if norm != None:
            if norm.lower() == 'batchnorm':
                Y = batch_norm(Y, is_training, trainable, name='%s_BN' % name)
            elif norm.lower() == 'instance':
                Y = instance_norm(Y, trainable, name='%s_IN' % name)
            else:
                print('Unknown normalization procedure', print(norm.lower()))
        if act != None:
            if act.lower() == 'relu':
                Y = tf.nn.relu(Y)
            elif act.lower() == 'lrelu':
                Y = lrelu(Y, leak=0.2)
            elif act.lower() == 'tanh':
                Y = tf.nn.tanh(Y)
            elif act.lower() == 'sigmoid':
                Y = tf.nn.sigmoid(Y)
            else:
                print('Unknown activation function')
    return Y
Exemplo n.º 4
0
def create_gan_G(z,
                 is_training,
                 Cout=1,
                 trainable=True,
                 reuse=False,
                 networktype='ganG'):
    '''input : batchsize * latentDim
       output: batchsize * 28 * 28 * 1'''
    with tf.variable_scope(networktype, reuse=reuse):
        Gout = dense(z,
                     is_training,
                     Cout=7 * 7 * 256,
                     trainable=trainable,
                     act='reLu',
                     norm='batchnorm',
                     name='dense1')
        Gout = tf.reshape(Gout, shape=[-1, 7, 7, 256])  # 7
        Gout = deconv(Gout,
                      is_training,
                      kernel_w=4,
                      stride=2,
                      epf=2,
                      Cout=128,
                      trainable=trainable,
                      act='reLu',
                      norm='batchnorm',
                      name='deconv1')  # 14
        Gout = deconv(Gout,
                      is_training,
                      kernel_w=4,
                      stride=2,
                      epf=2,
                      Cout=Cout,
                      trainable=trainable,
                      act=None,
                      norm=None,
                      name='deconv2')  # 28
        Gout = tf.nn.sigmoid(Gout)
    return Gout
Exemplo n.º 5
0
def create_discriminator(Xin, is_training, reuse=False, networktype='ganD'):
    with tf.variable_scope(networktype, reuse=reuse):
        Xout = dense(Xin,
                     is_training,
                     Cout=7 * 7 * 256,
                     act='reLu',
                     norm='batchnorm',
                     name='dense1')
        Xout = tf.reshape(Xout, shape=[-1, 7, 7, 256])  # 7
        Xout = conv(Xout,
                    is_training,
                    kernel_w=3,
                    stride=1,
                    pad=1,
                    Cout=128,
                    act='lrelu',
                    norm='batchnorm',
                    name='conv1')  # 7
        Xout = conv(Xout,
                    is_training,
                    kernel_w=3,
                    stride=1,
                    pad=1,
                    Cout=256,
                    act='lrelu',
                    norm='batchnorm',
                    name='conv2')  # 7
        Xout = conv(Xout,
                    is_training,
                    kernel_w=3,
                    stride=1,
                    pad=None,
                    Cout=1,
                    act=None,
                    norm='batchnorm',
                    name='conv3')  # 5
        Xout = tf.nn.sigmoid(Xout)
    return Xout