示例#1
0
文件: main.py 项目: RuiShu/bcde
    def make_writer():
        # Make log file
        writer = tb.FileWriter(log_file,
                               args=args,
                               pipe_to_sys=True,
                               overwrite=args.run >= 999)
        # Train log
        writer.add_var('train_iw', '{:4d}', T.iw)
        for v in ['bcde', 'bjde_x', 'bjde_xy', 'bjde_xu', 'bjde_yu', 'loss']:
            writer.add_var('train_{:s}'.format(v), '{:8.3f}', T[v])
        writer.add_var('l2_loss', '{:9.2e}', T.l2)

        # Validation log
        writer.add_var('valid_iw', '{:4d}')
        for v in ['bcde', 'bcde_x', 'bjde_xy', 'bjde_xu', 'bjde_yu', 'loss']:
            writer.add_var('valid_{:s}'.format(v), '{:8.3f}')

        # Test log
        writer.add_var('test_iw', '{:4d}')
        for v in ['bcde', 'bcde_x', 'bjde_xy', 'bjde_xu', 'bjde_yu', 'loss']:
            writer.add_var('test_{:s}'.format(v), '{:8.3f}')

        # Extra info
        writer.add_var('epoch', '{:>8d}')
        writer.initialize()
        return writer
示例#2
0
import tensorbayes as tb
from tensorbayes.layers import *
from tensorbayes.distributions import log_bernoulli_with_logits, log_normal

if args.nonlin == 'relu':
    activate = tf.nn.relu
elif args.nonlin == 'elu':
    activate = tf.nn.elu
else:
    raise Exception("Unexpected nonlinearity arg")
args.save_dir = args.save_dir.rstrip('/')
log_file = 'results/lvae{:d}.csv'.format(args.run)
model_dir = '{:s}/{:s}'.format(args.save_dir, log_file.rstrip('.csv'))
log_bern = lambda x, logits: log_bernoulli_with_logits(x, logits, args.eps)
log_norm = lambda x, mu, var: log_normal(x, mu, var, 0.0)
writer = tb.FileWriter(log_file, args=args, pipe_to_sys=True)


# Convenience layers and graph blocks
def name(index, suffix):
    return 'z{:d}'.format(index) + '_' + suffix


def encode_block(x, h_size, z_size, idx):
    with tf.variable_scope(name(idx, 'encode')):
        h = dense(x, h_size, 'layer1', activation=activate)
        h = dense(h, h_size, 'layer2', activation=activate)
    with tf.variable_scope(name(idx, 'encode/likelihood')):
        z_m = dense(h, z_size, 'mean')
        z_v = dense(h, z_size, 'var', activation=tf.nn.softplus) + args.eps
    return (z_m, z_v)
示例#3
0
d_fake_loss = tf.reduce_mean(tf.abs(x_fake - d_fake))
d_loss = d_real_loss - k * d_fake_loss
g_loss = d_fake_loss
m_global = d_real_loss + tf.abs(gamma * d_real_loss - d_fake_loss)

# Optimizer
d_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'disc*')
g_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'gen*')
print [v.name for v in d_var]
print [v.name for v in g_var]
d_train = tf.train.AdamOptimizer(lr, 0.5).minimize(d_loss, var_list=d_var)
g_train = tf.train.AdamOptimizer(lr, 0.5).minimize(g_loss, var_list=g_var)

# Logger
base_dir = 'results/gamma={:.1f}_run={:d}'.format(args.gamma, args.run)
writer = tb.FileWriter(os.path.join(base_dir, 'log.csv'), args=args, overwrite=args.run >= 999)
writer.add_var('d_real', '{:8.4f}', d_real_loss)
writer.add_var('d_fake', '{:8.4f}', d_fake_loss)
writer.add_var('k', '{:8.4f}', k * 1)
writer.add_var('M', '{:8.4f}', m_global)
writer.add_var('lr', '{:8.6f}', lr * 1)
writer.add_var('iter', '{:>8d}')
writer.initialize()

sess = tf.Session()
load_model(sess)
f_gen = tb.function(sess, [z], x_fake)
f_rec = tb.function(sess, [x_real], d_real)
celeba = CelebA(args.data)

# Alternatively try grouping d_train/g_train together
示例#4
0
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope
import tensorbayes as tb
from tensorbayes.layers import *
from tensorbayes.distributions import log_bernoulli_with_logits, log_normal
from data import Mnist

log_file = 'results/n_labels={:d}/m2_run={:d}.csv'.format(
    args.n_labels, args.run)
writer = tb.FileWriter(log_file,
                       args=args,
                       pipe_to_sys=True,
                       overwrite=args.run >= 999)
if args.nonlin == 'relu':
    activate = tf.nn.relu
elif args.nonlin == 'elu':
    activate = tf.nn.elu
else:
    raise Exception("Unexpected nonlinearity arg")
log_bern = lambda x, logits: log_bernoulli_with_logits(x, logits, args.eps)
log_norm = lambda x, mu, var: log_normal(x, mu, var, 0.0)


def name(idx, suffix):
    return 'L{:d}'.format(idx) + '_' + suffix