示例#1
0
def generator(name, in_data, training=True):
    '''生成器,通过name来获得具体的生成器'''
    # 1050 gtx 2G 约100ms一幅(256x256x3)
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
        features = encode(in_data, training)
        transformed_features = transform(features, training)
        genImg = decode(transformed_features, training)
        return genImg
示例#2
0
    def forward(self, x):
        encoded, log_std, means = self.encode(x)

        # encoded = F.normalize(encoded, dim=1, p=2)
        prediction = l.decode(encoded)
        prediction = F.dropout(prediction, p=0.5, training=self.training)

        self.means = means
        self.log_std = log_std

        return prediction
示例#3
0
文件: vader.py 项目: joalmjoalm/VaDER
 def f(z):
     return decode(z, self.D, self.I, self.cell_type, self.n_hidden,
                   self.recurrent, self.output_activation)
示例#4
0
    def forward(self, x):
        encoded, _, _ = self.encode(x)
        prediction = l.decode(encoded)
        prediction = F.dropout(prediction, p=0.5, training=self.training)

        return prediction
示例#5
0
def autoencoder(x, dropout, noise_std):

    #Variables - filters
    with tf.name_scope('weights'):
        #weights={'h1': tf.Variable(tf.truncated_normal([3,3,1,10],stddev=0.1)),
        #         'h2': tf.Variable(tf.truncated_normal([3,3,10,10],stddev=0.1)),
        #         'h3': tf.Variable(tf.truncated_normal([3,3,10,10],stddev=0.1))}
        weights = {
            'h1':
            tf.Variable(
                tf.random_uniform([3, 3, 1, 10], -1.0 / np.sqrt(10),
                                  1.0 / np.sqrt(10))),
            'h2':
            tf.Variable(
                tf.random_uniform([3, 3, 10, 10], -1.0 / np.sqrt(10),
                                  1.0 / np.sqrt(10))),
            'h3':
            tf.Variable(
                tf.random_uniform([3, 3, 10, 10], -1.0 / np.sqrt(10),
                                  1.0 / np.sqrt(10)))
        }
        for a_layer in weights:
            with tf.name_scope('weights_' + a_layer):
                variable_summaries(weights[a_layer])
    #Variables - biases
    with tf.name_scope('biases'):
        biases = {
            'h1': tf.Variable(tf.zeros([10])),
            'h2': tf.Variable(tf.zeros([10])),
            'h3': tf.Variable(tf.zeros([10])),
            'h4': tf.Variable(tf.zeros([10])),
            'h5': tf.Variable(tf.zeros([10])),
            'h6': tf.Variable(tf.zeros([1]))
        }
        for a_layer in biases:
            with tf.name_scope('bias_' + a_layer):
                variable_summaries(biases[a_layer])

    #reshape input
    with tf.name_scope('input'):
        x = tf.reshape(x, shape=[-1, 28, 28, 1])
        tf.summary.image('input', x, 10)

    #noise (training_time)
    with tf.name_scope('add_noise'):
        noise = tf.random_normal(shape=tf.shape(x),
                                 mean=0.0,
                                 stddev=noise_std,
                                 dtype=tf.float32)
        x_noised = x + noise
        x = x_noised

    #encoding
    with tf.name_scope('encoding'):
        code = encode(x, weights, biases)

    #dropout (training_time)
    code = tf.nn.dropout(code, dropout)

    #decoding
    with tf.name_scope('decoding'):
        reconstruction = decode(code, weights, biases)

    #reshape
    tf.summary.image('output', reconstruction, 10)
    code = tf.reshape(
        code,
        [-1, tf.shape(code)[1] * tf.shape(code)[2] * tf.shape(code)[3]])
    reconstruction = tf.reshape(reconstruction, [-1, n_input])

    #logging
    tf.summary.histogram('code', code)
    tf.summary.histogram('reconstruction', reconstruction)
    return (code, reconstruction)