Esempio n. 1
0
def ed_graph_2(disc=1):
	# Priors
	if str(sys.argv[4]) == 'laplace':
		W_0 = Laplace(loc=tf.zeros([D, n_hidden]), scale=(std**2/D)*tf.ones([D, n_hidden]))
		W_1 = Laplace(loc=tf.zeros([n_hidden, K]), scale=(std**2/n_hidden)*tf.ones([n_hidden, K]))
		b_0 = Laplace(loc=tf.zeros(n_hidden), scale=(std**2/D)*tf.ones(n_hidden))
		b_1 = Laplace(loc=tf.zeros(K), scale=(std**2/n_hidden)*tf.ones(K))

	if str(sys.argv[4]) == 'normal':
		W_0 = Normal(loc=tf.zeros([D, n_hidden]), scale=std*D**(-.5)*tf.ones([D, n_hidden]))
		W_1 = Normal(loc=tf.zeros([n_hidden, K]), scale=std*n_hidden**(-.5)*tf.ones([n_hidden, K]))
		b_0 = Normal(loc=tf.zeros(n_hidden), scale=std*D**(-.5)*tf.ones(n_hidden))
		b_1 = Normal(loc=tf.zeros(K), scale=std*n_hidden**(-.5)*tf.ones(K))

	if str(sys.argv[4]) == 'T':
		W_0 = StudentT(df=df*tf.ones([D, n_hidden]), loc=tf.zeros([D, n_hidden]), scale=std**2/D*tf.ones([D, n_hidden]))
		W_1 = StudentT(df=df*tf.ones([n_hidden, K]), loc=tf.zeros([n_hidden, K]), scale=std**2/n_hidden*tf.ones([n_hidden, K]))
		b_0 = StudentT(df=df*tf.ones([n_hidden]), loc=tf.zeros(n_hidden), scale=std**2/D*tf.ones(n_hidden))
		b_1 = StudentT(df=df*tf.ones([K]), loc=tf.zeros(K), scale=std**2/n_hidden*tf.ones(K))

	x = tf.placeholder(tf.float32, [None, None])
	y = Categorical(logits=nn(x, W_0, b_0, W_1, b_1))
	# We use a placeholder for the labels in anticipation of the traning data.
	y_ph = tf.placeholder(tf.int32, [None])

	# Use a placeholder for the pre-trained posteriors
	w0 = tf.placeholder(tf.float32, [n_samp, D, n_hidden])
	w1 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
	b0 = tf.placeholder(tf.float32, [n_samp, n_hidden])
	b1 = tf.placeholder(tf.float32, [n_samp, K])

	# Empirical distribution 
	qW_0 = Empirical(params=tf.Variable(w0))
	qW_1 = Empirical(params=tf.Variable(w1))
	qb_0 = Empirical(params=tf.Variable(b0))
	qb_1 = Empirical(params=tf.Variable(b1))
	
	if str(sys.argv[3]) == 'hmc':	
		inference = ed.HMC({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_ph})
	if str(sys.argv[3]) == 'sghmc':	
		inference = ed.SGHMC({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_ph})

	# Initialse the inference variables
	if str(sys.argv[3]) == 'hmc':
		inference.initialize(step_size = disc*leap_size, n_steps = step_no, n_print=100)
	if str(sys.argv[3]) == 'sghmc':
		inference.initialize(step_size = disc*leap_size, friction=disc**2*0.1, n_print=100)
	
	return ((x, y), y_ph, W_0, b_0, W_1, b_1, qW_0, qb_0, qW_1, qb_1, inference,
		w0, w1, b0, b1)
sigmaf = sigmaf_upperT + tf.transpose(sigmaf_upperT) - sigmaf_diag
f_scale = tf.cholesky(sigmaf + offset * tf.eye(M, dtype=tf.float64),
                      name='f_scale')

# p(F|U,X,Xu)
f = MultivariateNormalTriL(loc=tf.cast(KffKuuinvU, dtype=tf.float32),
                           scale_tril=tf.cast(f_scale, dtype=tf.float32),
                           name='pf')

# p(Y|F)
t_var_pre = tf.Variable(0.5 * np.ones((G, 1)), dtype=tf.float32)
t_var_full = tf.nn.softplus(t_var_pre)
idx_g = tf.placeholder(tf.int32, p)
t_var = tf.gather(t_var_full, idx_g)
if Terror:
    y = StudentT(df=df, loc=f, scale=t_var)
else:
    y = Normal(loc=f, scale=t_var)

## Define q(x)

qx_mean = tf.Variable(qx_init, dtype=tf.float32, name='qx_mean')
qx_scale = tf.Variable(tf.ones((N, Q)), dtype=tf.float32, name='qx_scale')

idx_ph = tf.placeholder(tf.int32, M)
qx_mini = tf.gather(qx_mean, idx_ph)
qx_scale_mini = tf.gather(qx_scale, idx_ph)

qx = Normal(loc=qx_mini, scale=tf.nn.softplus(qx_scale_mini), name='qx')

QKfu = kernelfx(qx, xu)
def ed_graph_init():
    # Graph for prior distributions
    if str(sys.argv[4]) == 'laplace':
        W_0 = Laplace(loc=tf.zeros([D, n_hidden]),
                      scale=tf.ones([D, n_hidden]))
        W_1 = Laplace(loc=tf.zeros([n_hidden, K]),
                      scale=(std**2 / n_hidden) * tf.ones([n_hidden, K]))
        b_0 = Laplace(loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
        b_1 = Laplace(loc=tf.zeros(K), scale=(std**2 / n_hidden) * tf.ones(K))
    if str(sys.argv[4]) == 'normal':
        W_0 = Normal(loc=tf.zeros([D, n_hidden]), scale=tf.ones([D, n_hidden]))
        W_1 = Normal(loc=tf.zeros([n_hidden, K]),
                     scale=std * n_hidden**(-.5) * tf.ones([n_hidden, K]))
        b_0 = Normal(loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
        b_1 = Normal(loc=tf.zeros(K), scale=std * n_hidden**(-.5) * tf.ones(K))
    if str(sys.argv[4]) == 'T':
        W_0 = StudentT(df=df * tf.ones([D, n_hidden]),
                       loc=tf.zeros([D, n_hidden]),
                       scale=tf.ones([D, n_hidden]))
        W_1 = StudentT(df=df * tf.ones([n_hidden, K]),
                       loc=tf.zeros([n_hidden, K]),
                       scale=std**2 / n_hidden * tf.ones([n_hidden, K]))
        b_0 = StudentT(df=df * tf.ones([n_hidden]),
                       loc=tf.zeros(n_hidden),
                       scale=tf.ones(n_hidden))
        b_1 = StudentT(df=df * tf.ones([K]),
                       loc=tf.zeros(K),
                       scale=std**2 / n_hidden * tf.ones(K))
    # Inputs
    x = tf.placeholder(tf.float32, [None, D])
    # Regression likelihood
    y = Normal(loc=nn(x, W_0, b_0, W_1, b_1),
               scale=std_out * tf.ones([tf.shape(x)[0]]))
    # We use a placeholder for the labels in anticipation of the traning data.
    y_ph = tf.placeholder(tf.float32, [None])

    # Graph for posterior distribution
    if str(sys.argv[4]) == 'normal':
        qW_0 = Empirical(
            params=tf.Variable(tf.random_normal([n_samp, D, n_hidden])))
        qW_1 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, n_hidden, K],
                             stddev=std * (n_hidden**-.5))))
        qb_0 = Empirical(
            params=tf.Variable(tf.random_normal([n_samp, n_hidden])))
        qb_1 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, K], stddev=std * (n_hidden**-.5))))
    if str(sys.argv[4]) == 'laplace' or str(sys.argv[4]) == 'T':
        # Use a placeholder otherwise cannot assign a tensor > 2GB
        w0 = tf.placeholder(tf.float32, [n_samp, D, n_hidden])
        w1 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
        b0 = tf.placeholder(tf.float32, [n_samp, n_hidden])
        b1 = tf.placeholder(tf.float32, [n_samp, K])
        # Empirical distribution
        qW_0 = Empirical(params=tf.Variable(w0))
        qW_1 = Empirical(params=tf.Variable(w1))
        qb_0 = Empirical(params=tf.Variable(b0))
        qb_1 = Empirical(params=tf.Variable(b1))
    # Build inference graph
    if str(sys.argv[3]) == 'hmc':
        inference = ed.HMC({
            W_0: qW_0,
            b_0: qb_0,
            W_1: qW_1,
            b_1: qb_1
        },
                           data={y: y_ph})
    if str(sys.argv[3]) == 'sghmc':
        inference = ed.SGHMC({
            W_0: qW_0,
            b_0: qb_0,
            W_1: qW_1,
            b_1: qb_1
        },
                             data={y: y_ph})

    # Initialse the inference variables
    if str(sys.argv[3]) == 'hmc':
        inference.initialize(step_size=leap_size, n_steps=step_no, n_print=100)
    if str(sys.argv[3]) == 'sghmc':
        inference.initialize(step_size=leap_size, friction=0.4, n_print=100)

    if str(sys.argv[4]) == 'laplace' or str(sys.argv[4]) == 'T':
        return ((x, y), y_ph, W_0, b_0, W_1, b_1, qW_0, qb_0, qW_1, qb_1,
                inference, w0, w1, b0, b1)
    else:
        return (x,
                y), y_ph, W_0, b_0, W_1, b_1, qW_0, qb_0, qW_1, qb_1, inference
Esempio n. 4
0
def _test(df, mu, sigma, n):
    x = StudentT(df=df, mu=mu, sigma=sigma)
    val_est = get_dims(x.sample(n))
    val_true = n + get_dims(mu)
    assert val_est == val_true
Esempio n. 5
0
    W_0 = Laplace(loc=tf.zeros([D, n_hidden]), scale=tf.ones([D, n_hidden]))
    W_1 = Laplace(loc=tf.zeros([n_hidden, K]),
                  scale=std**2 * (n_hidden**-1) * tf.ones([n_hidden, K]))
    b_0 = Laplace(loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
    b_1 = Laplace(loc=tf.zeros(K), scale=std**2 * (n_hidden**-1) * tf.ones(K))

if str(sys.argv[3]) == 'normal':
    W_0 = Normal(loc=tf.zeros([D, n_hidden]), scale=tf.ones([D, n_hidden]))
    W_1 = Normal(loc=tf.zeros([n_hidden, K]),
                 scale=std * (n_hidden**-.5) * tf.ones([n_hidden, K]))
    b_0 = Normal(loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
    b_1 = Normal(loc=tf.zeros(K), scale=std * (n_hidden**-.5) * tf.ones(K))

if str(sys.argv[3]) == 'T':
    W_0 = StudentT(df=df * tf.ones([D, n_hidden]),
                   loc=tf.zeros([D, n_hidden]),
                   scale=tf.ones([D, n_hidden]))
    W_1 = StudentT(df=df * tf.ones([n_hidden, K]),
                   loc=tf.zeros([n_hidden, K]),
                   scale=abs(df / (df - 2)) * n_hidden**(-.5) *
                   tf.ones([n_hidden, K]))
    b_0 = StudentT(df=df * tf.ones([n_hidden]),
                   loc=tf.zeros(n_hidden),
                   scale=tf.ones(n_hidden))
    b_1 = StudentT(df=df * tf.ones([K]),
                   loc=tf.zeros(K),
                   scale=abs(df / (df - 2)) * n_hidden**(-.5) * tf.ones(K))

x = tf.placeholder(tf.float32, [None, None])
y = Normal(loc=nn(x, W_0, b_0, W_1, b_1),
           scale=std_out * tf.ones([tf.shape(x)[0]]))
Esempio n. 6
0
def ed_graph_2(disc=1):
    # Priors
    if str(sys.argv[4]) == 'laplace':
        W_0 = Laplace(loc=tf.zeros([D, n_hidden]),
                      scale=(std**2 / D) * tf.ones([D, n_hidden]))
        W_1 = Laplace(loc=tf.zeros([n_hidden, n_hidden]),
                      scale=(std**2 / n_hidden) *
                      tf.ones([n_hidden, n_hidden]))
        W_2 = Laplace(loc=tf.zeros([n_hidden, K]),
                      scale=(std**2 / n_hidden) * tf.ones([n_hidden, K]))
        b_0 = Laplace(loc=tf.zeros(n_hidden),
                      scale=(std**2 / D) * tf.ones(n_hidden))
        b_1 = Laplace(loc=tf.zeros(n_hidden),
                      scale=(std**2 / n_hidden) * tf.ones(n_hidden))
        b_2 = Laplace(loc=tf.zeros(K), scale=(std**2 / n_hidden) * tf.ones(K))

    if str(sys.argv[4]) == 'normal':
        W_0 = Normal(loc=tf.zeros([D, n_hidden]),
                     scale=std * D**-.5 * tf.ones([D, n_hidden]))
        W_1 = Normal(loc=tf.zeros([n_hidden, K]),
                     scale=std * n_hidden**-.5 * tf.ones([n_hidden, K]))
        W_2 = Normal(loc=tf.zeros([n_hidden, K]),
                     scale=std * n_hidden**-.5 * tf.ones([n_hidden, K]))
        b_0 = Normal(loc=tf.zeros(n_hidden),
                     scale=std * D**-.5 * tf.ones(n_hidden))
        b_1 = Normal(loc=tf.zeros(n_hidden),
                     scale=10 * n_hidden**(-.5) * tf.ones(n_hidden))
        b_2 = Normal(loc=tf.zeros(K), scale=10 * n_hidden**(-.5) * tf.ones(K))

    if str(sys.argv[4]) == 'T':
        W_0 = StudentT(df=df * tf.ones([D, n_hidden]),
                       loc=tf.zeros([D, n_hidden]),
                       scale=(std**2 / D) * tf.ones([D, n_hidden]))
        W_1 = StudentT(df=df * tf.ones([n_hidden, n_hidden]),
                       loc=tf.zeros([n_hidden, n_hidden]),
                       scale=(std**2 / n_hidden) *
                       tf.ones([n_hidden, n_hidden]))
        W_2 = StudentT(df=df * tf.ones([n_hidden, K]),
                       loc=tf.zeros([n_hidden, K]),
                       scale=(std**2 / n_hidden) * tf.ones([n_hidden, K]))
        b_0 = StudentT(df=df * tf.ones([n_hidden]),
                       loc=tf.zeros(n_hidden),
                       scale=(std**2 / D) * tf.ones(n_hidden))
        b_1 = StudentT(df=df * tf.ones([n_hidden]),
                       loc=tf.zeros(n_hidden),
                       scale=(std**2 / n_hidden) * tf.ones(n_hidden))
        b_2 = StudentT(df=df * tf.ones([K]),
                       loc=tf.zeros(K),
                       scale=(std**2 / n_hidden) * tf.ones(K))

    x = tf.placeholder(tf.float32, [None, None])
    y = Categorical(logits=nn(x, W_0, b_0, W_1, b_1, W_2, b_2))
    # We use a placeholder for the labels in anticipation of the traning data.
    y_ph = tf.placeholder(tf.int32, [N])

    # Use a placeholder for the pre-trained posteriors
    p0 = tf.placeholder(tf.float32, [n_samp, D, n_hidden])
    p1 = tf.placeholder(tf.float32, [n_samp, n_hidden, n_hidden])
    p2 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
    pp0 = tf.placeholder(tf.float32, [n_samp, n_hidden])
    pp1 = tf.placeholder(tf.float32, [n_samp, n_hidden])
    pp2 = tf.placeholder(tf.float32, [n_samp, K])

    w0 = tf.Variable(p0)
    w1 = tf.Variable(p1)
    w2 = tf.Variable(p2)
    b0 = tf.Variable(pp0)
    b1 = tf.Variable(pp1)
    b2 = tf.Variable(pp2)
    # Empirical distribution
    qW_0 = Empirical(params=w0)
    qW_1 = Empirical(params=w1)
    qW_2 = Empirical(params=w2)
    qb_0 = Empirical(params=b0)
    qb_1 = Empirical(params=b1)
    qb_2 = Empirical(params=b2)

    if str(sys.argv[3]) == 'hmc':
        inference = ed.HMC(
            {
                W_0: qW_0,
                b_0: qb_0,
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2
            },
            data={y: y_ph})
    if str(sys.argv[3]) == 'sghmc':
        inference = ed.SGHMC(
            {
                W_0: qW_0,
                b_0: qb_0,
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2
            },
            data={y: y_ph})

    # Initialse the inference variables
    if str(sys.argv[3]) == 'hmc':
        inference.initialize(step_size=leap_size,
                             n_steps=step_no,
                             n_print=100,
                             scale={y: float(mnist.train.num_examples) / N})
    if str(sys.argv[3]) == 'sghmc':
        inference.initialize(step_size=leap_size,
                             friction=0.4,
                             n_print=100,
                             scale={y: float(mnist.train.num_examples) / N})

    return ((x, y), y_ph, W_0, b_0, W_1, b_1, W_2, b_2, qW_0, qb_0, qW_1, qb_1,
            qW_2, qb_2, inference, p0, p1, p2, pp0, pp1, pp2, w0, w1, w2, b0,
            b1, b2)
    return np.mean(acc)  

if str(sys.argv[5]) == 'laplace':
	W_0 = Laplace(loc=tf.zeros([D, n_hidden]), scale=std**2/D*tf.ones([D, n_hidden]))
	W_1 = Laplace(loc=tf.zeros([n_hidden, K]), scale=std**2/n_hidden*tf.ones([n_hidden, K]))
	b_0 = Laplace(loc=tf.zeros(n_hidden), scale=std**2/D*tf.ones(n_hidden))
	b_1 = Laplace(loc=tf.zeros(K), scale=std**2/n_hidden*tf.ones(K))

if str(sys.argv[5]) == 'normal':
	W_0 = Normal(loc=tf.zeros([D, n_hidden]), scale=std*D**(-.5)*tf.ones([D, n_hidden]))
	W_1 = Normal(loc=tf.zeros([n_hidden, K]), scale=std*n_hidden**(-.5)*tf.ones([n_hidden, K]))
	b_0 = Normal(loc=tf.zeros(n_hidden), scale=std*D**(-.5)*tf.ones(n_hidden))
	b_1 = Normal(loc=tf.zeros(K), scale=std*n_hidden**(-.5)*tf.ones(K))

if str(sys.argv[5]) == 'T':
	W_0 = StudentT(df=df*tf.ones([D, n_hidden]), loc=tf.zeros([D, n_hidden]), scale=std**2/D*tf.ones([D, n_hidden]))
	W_1 = StudentT(df=df*tf.ones([n_hidden, K]), loc=tf.zeros([n_hidden, K]), scale=std**2/n_hidden*tf.ones([n_hidden, K]))
	b_0 = StudentT(df=df*tf.ones([n_hidden]), loc=tf.zeros(n_hidden), scale=std**2/D*tf.ones(n_hidden))
	b_1 = StudentT(df=df*tf.ones([K]), loc=tf.zeros(K), scale=std**2/n_hidden*tf.ones(K))

x = tf.placeholder(tf.float32, [None, None])
y = Categorical(logits=nn(x, W_0, b_0, W_1, b_1))
# We use a placeholder for the labels in anticipation of the traning data.
y_ph = tf.placeholder(tf.int32, [None])

# Build predictive graph
#
x_pred = tf.placeholder(tf.float32, [None, None])
ww0 = tf.placeholder(tf.float32, [None, None])
ww1 = tf.placeholder(tf.float32, [None, None])
bb0 = tf.placeholder(tf.float32, [None])
Esempio n. 8
0
def _test(df, mu, sigma, n):
  x = StudentT(df=df, mu=mu, sigma=sigma)
  val_est = get_dims(x.sample(n))
  val_true = n + get_dims(mu)
  assert val_est == val_true
def ed_graph_init():
    # Priors
    if str(sys.argv[4]) == 'laplace':
        W_0 = Laplace(loc=tf.zeros([F, F, 1, C]), scale=tf.ones([F, F, 1, C]))
        W_1 = Laplace(loc=tf.zeros([F, F, C, C]), scale=tf.ones([F, F, C, C]))
        W_2 = Laplace(loc=tf.zeros([7 * 7 * C, n_hidden]),
                      scale=std**2 / (7 * 7 * C) *
                      tf.ones([7 * 7 * C, n_hidden]))
        W_3 = Laplace(loc=tf.zeros([n_hidden, K]),
                      scale=std**2 / n_hidden * tf.ones([n_hidden, K]))
        b_0 = Laplace(loc=tf.zeros(C), scale=tf.ones(C))
        b_1 = Laplace(loc=tf.zeros(C), scale=tf.ones(C))
        b_2 = Laplace(loc=tf.zeros(n_hidden),
                      scale=std**2 / (7 * 7 * C) * tf.ones(n_hidden))
        b_3 = Laplace(loc=tf.zeros(K), scale=std**2 / n_hidden * tf.ones(K))

    if str(sys.argv[4]) == 'normal':
        W_0 = Normal(loc=tf.zeros([F, F, 1, C]), scale=tf.ones([F, F, 1, C]))
        W_1 = Normal(loc=tf.zeros([F, F, C, C]), scale=tf.ones([F, F, C, C]))
        W_2 = Normal(loc=tf.zeros([7 * 7 * C, n_hidden]),
                     scale=std * (7 * 7 * C)**-.5 *
                     tf.ones([7 * 7 * C, n_hidden]))
        W_3 = Normal(loc=tf.zeros([n_hidden, K]),
                     scale=std * n_hidden**-.5 * tf.ones([n_hidden, K]))
        b_0 = Normal(loc=tf.zeros(C), scale=tf.ones(C))
        b_1 = Normal(loc=tf.zeros(C), scale=tf.ones(C))
        b_2 = Normal(loc=tf.zeros(n_hidden),
                     scale=std * (7 * 7 * C)**-.5 * tf.ones(n_hidden))
        b_3 = Normal(loc=tf.zeros(K), scale=std * n_hidden**-.5 * tf.ones(K))

    if str(sys.argv[4]) == 'T':
        W_0 = StudentT(df=df * tf.ones([F, F, 1, C]),
                       loc=tf.zeros([F, F, 1, C]),
                       scale=tf.ones([F, F, 1, C]))
        W_1 = StudentT(df=df * tf.ones([F, F, C, C]),
                       loc=tf.zeros([F, F, C, C]),
                       scale=tf.ones([F, F, C, C]))
        W_2 = StudentT(df=df * tf.ones([7 * 7 * C, n_hidden]),
                       loc=tf.zeros([7 * 7 * C, n_hidden]),
                       scale=std**2 / (7 * 7 * C) *
                       tf.ones([7 * 7 * C, n_hidden]))
        W_3 = StudentT(df=df * tf.ones([n_hidden, K]),
                       loc=tf.zeros([n_hidden, K]),
                       scale=std**2 / n_hidden * tf.ones([n_hidden, K]))
        b_0 = StudentT(df=df * tf.ones(C), loc=tf.zeros(C), scale=tf.ones(C))
        b_1 = StudentT(df=df * tf.ones(C), loc=tf.zeros(C), scale=tf.ones(C))
        b_2 = StudentT(df=df * tf.ones(n_hidden),
                       loc=tf.zeros(n_hidden),
                       scale=std**2 / (7 * 7 * C) * tf.ones(n_hidden))
        b_3 = StudentT(df=df * tf.ones(K),
                       loc=tf.zeros(K),
                       scale=std**2 / n_hidden * tf.ones(K))

    x = tf.placeholder(tf.float32, [None, None])
    # Categorical likelihood
    y = Categorical(logits=nn(x, W_0, b_0, W_1, b_1, W_2, b_2, W_3, b_3))
    # We use a placeholder for the labels in anticipation of the traning data.
    y_ph = tf.placeholder(tf.int32, [None])

    # Posteriors
    if str(sys.argv[4]) == 'normal':
        qW_0 = Empirical(
            params=tf.Variable(tf.random_normal([n_samp, F, F, 1, C])))
        qW_1 = Empirical(
            params=tf.Variable(tf.random_normal([n_samp, F, F, C, C])))
        qW_2 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, 7 * 7 * C, n_hidden],
                             stddev=std * (7 * 7 * C)**-.5)))
        qW_3 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, n_hidden, K],
                             stddev=std * (n_hidden)**-.5)))
        qb_0 = Empirical(params=tf.Variable(tf.random_normal([n_samp, C])))
        qb_1 = Empirical(params=tf.Variable(tf.random_normal([n_samp, C])))
        qb_2 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, n_hidden], stddev=std *
                             (7 * 7 * C)**-.5)))
        qb_3 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, K], stddev=std * (n_hidden)**-.5)))

    if str(sys.argv[4]) == 'laplace' or str(sys.argv[4]) == 'T':
        # Use a placeholder otherwise cannot assign a tensor > 2GB
        p0 = tf.placeholder(tf.float32, [n_samp, F, F, 1, C])
        p1 = tf.placeholder(tf.float32, [n_samp, F, F, C, C])
        p2 = tf.placeholder(tf.float32, [n_samp, 7 * 7 * C, n_hidden])
        p3 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
        pp0 = tf.placeholder(tf.float32, [n_samp, C])
        pp1 = tf.placeholder(tf.float32, [n_samp, C])
        pp2 = tf.placeholder(tf.float32, [n_samp, n_hidden])
        pp3 = tf.placeholder(tf.float32, [n_samp, K])

        w0 = tf.Variable(p0)
        w1 = tf.Variable(p1)
        w2 = tf.Variable(p2)
        w3 = tf.Variable(p3)
        b0 = tf.Variable(pp0)
        b1 = tf.Variable(pp1)
        b2 = tf.Variable(pp2)
        b3 = tf.Variable(pp3)

        # Empirical distribution
        qW_0 = Empirical(params=w0)
        qW_1 = Empirical(params=w1)
        qW_2 = Empirical(params=w2)
        qW_3 = Empirical(params=w3)
        qb_0 = Empirical(params=b0)
        qb_1 = Empirical(params=b1)
        qb_2 = Empirical(params=b2)
        qb_3 = Empirical(params=b3)

    if str(sys.argv[3]) == 'hmc':
        inference = ed.HMC(
            {
                W_0: qW_0,
                b_0: qb_0,
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2,
                W_3: qW_3,
                b_3: qb_3
            },
            data={y: y_ph})
    if str(sys.argv[3]) == 'sghmc':
        inference = ed.SGHMC(
            {
                W_0: qW_0,
                b_0: qb_0,
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2,
                W_3: qW_3,
                b_3: qb_3
            },
            data={y: y_ph})

    # Initialse the inference variables
    if str(sys.argv[3]) == 'hmc':
        inference.initialize(step_size=leap_size,
                             n_steps=step_no,
                             n_print=100,
                             scale={y: float(mnist.train.num_examples) / N})
    if str(sys.argv[3]) == 'sghmc':
        inference.initialize(step_size=leap_size,
                             friction=0.4,
                             n_print=100,
                             scale={y: float(mnist.train.num_examples) / N})

    if str(sys.argv[4]) == 'laplace' or str(sys.argv[4]) == 'T':
        return ((x, y), y_ph, W_0, b_0, W_1, b_1, W_2, b_2, qW_0, qb_0, qW_1,
                qb_1, qW_2, qb_2, qW_3, qb_3, inference, p0, p1, p2, p3, pp0,
                pp1, pp2, pp3, w0, w1, w2, w3, b0, b1, b2, b3)
    else:
        return ((x, y), y_ph, W_0, b_0, W_1, b_1, W_2, b_2, qW_0, qb_0, qW_1,
                qb_1, qW_2, qb_2, qW_3, qb_3, inference)
Esempio n. 10
0
def ed_graph_init():
    # Priors
    if str(sys.argv[4]) == 'laplace':
        W_0 = Laplace(loc=tf.zeros([D, n_hidden]),
                      scale=(std**2 / D) * tf.ones([D, n_hidden]))
        W_1 = Laplace(loc=tf.zeros([n_hidden, K]),
                      scale=(std**2 / n_hidden) * tf.ones([n_hidden, K]))
        b_0 = Laplace(loc=tf.zeros(n_hidden),
                      scale=(std**2 / D) * tf.ones(n_hidden))
        b_1 = Laplace(loc=tf.zeros(K), scale=(std**2 / n_hidden) * tf.ones(K))

    if str(sys.argv[4]) == 'normal':
        W_0 = Normal(loc=tf.zeros([D, n_hidden]),
                     scale=std * D**(-.5) * tf.ones([D, n_hidden]))
        W_1 = Normal(loc=tf.zeros([n_hidden, K]),
                     scale=std * n_hidden**(-.5) * tf.ones([n_hidden, K]))
        b_0 = Normal(loc=tf.zeros(n_hidden),
                     scale=std * D**(-.5) * tf.ones(n_hidden))
        b_1 = Normal(loc=tf.zeros(K), scale=std * n_hidden**(-.5) * tf.ones(K))

    if str(sys.argv[4]) == 'T':
        W_0 = StudentT(df=df * tf.ones([D, n_hidden]),
                       loc=tf.zeros([D, n_hidden]),
                       scale=std2 * 2 / D * tf.ones([D, n_hidden]))
        W_1 = StudentT(df=df * tf.ones([n_hidden, K]),
                       loc=tf.zeros([n_hidden, K]),
                       scale=std**2 / n_hidden * tf.ones([n_hidden, K]))
        b_0 = StudentT(df=df * tf.ones([n_hidden]),
                       loc=tf.zeros(n_hidden),
                       scale=std**2 / D * tf.ones(n_hidden))
        b_1 = StudentT(df=df * tf.ones([K]),
                       loc=tf.zeros(K),
                       scale=std**2 / n_hidden * tf.ones(K))

    x = tf.placeholder(tf.float32, [None, None])
    # Categorical likelihood
    y = Categorical(logits=nn(x, W_0, b_0, W_1, b_1))
    # We use a placeholder for the labels in anticipation of the traning data.
    y_ph = tf.placeholder(tf.int32, [None])

    # Posteriors
    if str(sys.argv[4]) == 'normal':
        qW_0 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, D, n_hidden], stddev=std * (D**-.5))))
        qW_1 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, n_hidden, K],
                             stddev=std * (n_hidden**-.5))))
        qb_0 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, n_hidden], stddev=std * (D**-.5))))
        qb_1 = Empirical(params=tf.Variable(
            tf.random_normal([n_samp, K], stddev=std * (n_hidden**-.5))))

    if str(sys.argv[4]) == 'laplace' or str(sys.argv[4]) == 'T':
        # Use a placeholder otherwise cannot assign a tensor > 2GB
        w0 = tf.placeholder(tf.float32, [n_samp, D, n_hidden])
        w1 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
        b0 = tf.placeholder(tf.float32, [n_samp, n_hidden])
        b1 = tf.placeholder(tf.float32, [n_samp, K])

        # Empirical distribution will be laplace(0,1)
        qW_0 = Empirical(params=tf.Variable(w0))
        qW_1 = Empirical(params=tf.Variable(w1))
        qb_0 = Empirical(params=tf.Variable(b0))
        qb_1 = Empirical(params=tf.Variable(b1))

    if str(sys.argv[3]) == 'hmc':
        inference = ed.HMC({
            W_0: qW_0,
            b_0: qb_0,
            W_1: qW_1,
            b_1: qb_1
        },
                           data={y: y_ph})
    if str(sys.argv[3]) == 'sghmc':
        inference = ed.SGHMC({
            W_0: qW_0,
            b_0: qb_0,
            W_1: qW_1,
            b_1: qb_1
        },
                             data={y: y_ph})

    # Initialse the inference variables
    if str(sys.argv[3]) == 'hmc':
        inference.initialize(step_size=leap_size,
                             n_steps=step_no,
                             n_print=100,
                             scale={y: float(mnist.train.num_examples) / N})
    if str(sys.argv[3]) == 'sghmc':
        inference.initialize(step_size=leap_size,
                             friction=0.4,
                             n_print=100,
                             scale={y: float(mnist.train.num_examples) / N})

    if str(sys.argv[4]) == 'laplace' or str(sys.argv[4]) == 'T':
        return ((x, y), y_ph, W_0, b_0, W_1, b_1, qW_0, qb_0, qW_1, qb_1,
                inference, w0, w1, b0, b1)
    else:
        return (x,
                y), y_ph, W_0, b_0, W_1, b_1, qW_0, qb_0, qW_1, qb_1, inference
def ed_graph_2(disc=1):
	# Priors
	if str(sys.argv[4]) == 'laplace':
		W_0 = Laplace(loc=tf.zeros([D, n_hidden]), scale=tf.ones([D, n_hidden]))
		W_1 = Laplace(loc=tf.zeros([n_hidden, n_hidden]), scale=std**2*(n_hidden**-1)*tf.ones([n_hidden, n_hidden]))
		W_2 = Laplace(loc=tf.zeros([n_hidden, K]), scale=std**2*(n_hidden**-1)*tf.ones([n_hidden, K]))
		b_0 = Laplace(loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
		b_1 = Laplace(loc=tf.zeros(n_hidden), scale=std**2*(n_hidden**-1)*tf.ones(n_hidden))
		b_2 = Laplace(loc=tf.zeros(K), scale=std**2*(n_hidden**-1)*tf.ones(K))

	if str(sys.argv[4]) == 'normal':
		W_0 = Normal(loc=tf.zeros([D, n_hidden]), scale=tf.ones([D, n_hidden]))
		W_1 = Normal(loc=tf.zeros([n_hidden, n_hidden]), scale=std*(n_hidden**-.5)*tf.ones([n_hidden, n_hidden]))
		W_2 = Normal(loc=tf.zeros([n_hidden, K]), scale=std*(n_hidden**-.5)*tf.ones([n_hidden, K]))
		b_0 = Normal(loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
		b_1 = Normal(loc=tf.zeros(n_hidden), scale=std*(n_hidden**-.5)*tf.ones(n_hidden))
		b_2 = Normal(loc=tf.zeros(K), scale=std*(n_hidden**-.5)*tf.ones(K))

	if str(sys.argv[4]) == 'T':
		W_0 = StudentT(df=df*tf.ones([D, n_hidden]), loc=tf.zeros([D, n_hidden]), scale=tf.ones([D, n_hidden]))
		W_1 = StudentT(df=df*tf.ones([n_hidden, n_hidden]), loc=tf.zeros([n_hidden, n_hidden]), scale=std**2/n_hidden*tf.ones([n_hidden, n_hidden]))
		W_2 = StudentT(df=df*tf.ones([n_hidden, K]), loc=tf.zeros([n_hidden, K]), scale=std**2/n_hidden*tf.ones([n_hidden, K]))
		b_0 = StudentT(df=df*tf.ones([n_hidden]), loc=tf.zeros(n_hidden), scale=tf.ones(n_hidden))
		b_1 = StudentT(df=df*tf.ones([n_hidden]), loc=tf.zeros(n_hidden), scale=std**2/n_hidden*tf.ones(n_hidden))
		b_2 = StudentT(df=df*tf.ones([K]), loc=tf.zeros(K), scale=std**2/n_hidden*tf.ones(K))
	# Inputs
	x = tf.placeholder(tf.float32, [None, None])
	# Regression output
	y = Normal(loc=nn(x, W_0, b_0, W_1, b_1, W_2, b_2), scale=std_out*tf.ones([tf.shape(x)[0]]))
	# We use a placeholder for the labels in anticipation of the traning data.
	y_ph = tf.placeholder(tf.float32, [None])

	# Use a placeholder for the pre-trained posteriors
	w0 = tf.placeholder(tf.float32, [n_samp, D, n_hidden])
	w1 = tf.placeholder(tf.float32, [n_samp, n_hidden, n_hidden])
	w2 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
	b0 = tf.placeholder(tf.float32, [n_samp, n_hidden])
	b1 = tf.placeholder(tf.float32, [n_samp, n_hidden])
	b2 = tf.placeholder(tf.float32, [n_samp, K])

	# Empirical distributions
	qW_0 = Empirical(params=tf.Variable(w0))
	qW_1 = Empirical(params=tf.Variable(w1))
	qW_2 = Empirical(params=tf.Variable(w2))
	qb_0 = Empirical(params=tf.Variable(b0))
	qb_1 = Empirical(params=tf.Variable(b1))
	qb_2 = Empirical(params=tf.Variable(b2))
	
	if str(sys.argv[3]) == 'hmc':	
		inference = ed.HMC({W_0: qW_0, b_0: qb_0, W_1: qW_1, 
			b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={y: y_ph})
	if str(sys.argv[3]) == 'sghmc':	
		inference = ed.SGHMC({W_0: qW_0, b_0: qb_0, W_1: qW_1, 
			b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={y: y_ph})

	# Initialse the inference variables
	if str(sys.argv[3]) == 'hmc':
		inference.initialize(step_size = disc*leap_size, n_steps = step_no, n_print=100)
	if str(sys.argv[3]) == 'sghmc':
		inference.initialize(step_size = disc*leap_size, friction=0.4, n_print=100)
	
	return ((x, y), y_ph, W_0, b_0, W_1, b_1, W_2, b_2, qW_0, qb_0, qW_1, qb_1, 
		qW_2, qb_2, inference, 	w0, w1, w2, b0, b1, b2)