Ejemplo n.º 1
0
    def joint_log_prob(z):
        gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
        diff_img = gen_out - tf.constant(noisy_mat4d)
        prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim,
                                                        dtype=np.float32),
                                           scale_diag=np.ones(
                                               z_dim, dtype=np.float32))
        dim_window = iter_no * (tile_size**2)

        if (iter_no == 1):
            diff_img_visible = tf.reshape(
                tf.slice(diff_img, [0, tile_tl_row, tile_tl_col, 0],
                         [1, tile_size, tile_size, 1]),
                [int(dim_window / iter_no)])
            like = tfd.MultivariateNormalDiag(
                loc=np.zeros(dim_window, dtype=np.float32),
                scale_diag=np.sqrt(noise_var) *
                np.ones(dim_window, dtype=np.float32))
            return (prior.log_prob(z) + like.log_prob(diff_img_visible))
        else:
            ls = []
            for ii in range(iter_no):
                ls.append(
                    tf.reshape(
                        tf.slice(diff_img, [0, rows[ii], cols[ii], 0],
                                 [1, tile_size, tile_size, 1]),
                        [int(dim_window / iter_no)]))
            diff_img_visible = tf.concat(ls, axis=0)
            like = tfd.MultivariateNormalDiag(
                loc=np.zeros(dim_window, dtype=np.float32),
                scale_diag=np.sqrt(noise_var) *
                np.ones(dim_window, dtype=np.float32))
            return (prior.log_prob(z) + like.log_prob(diff_img_visible))
Ejemplo n.º 2
0
 def joint_log_prob(z):              
     gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
     diff_img = tf.reshape(gen_out - tf.constant(noisy_mat4d), [dim_like])
     
     if random_sampling==True:
         diff_img = tf.gather_nd(diff_img, indices)
     
     prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim, dtype=np.float32), 
                                        scale_diag=np.ones(z_dim, dtype=np.float32))
     like = tfd.MultivariateNormalDiag(loc=np.zeros(args.n_meas, dtype=np.float32), 
                                       scale_diag=np.sqrt(args.noise_var)*np.ones(args.n_meas, dtype=np.float32))
     
     return (prior.log_prob(z) + like.log_prob(diff_img))
Ejemplo n.º 3
0
    def joint_log_prob(z):
        gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
        diff_img = tf.reshape(gen_out - tf.constant(noisy_mat4d), [dim_like])

        prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim,
                                                        dtype=np.float32),
                                           scale_diag=np.ones(
                                               z_dim, dtype=np.float32))
        like = tfd.MultivariateNormalDiag(loc=np.zeros(dim_like,
                                                       dtype=np.float32),
                                          scale_diag=np.sqrt(noise_var) *
                                          np.ones(dim_like, dtype=np.float32))

        return (prior.log_prob(z) + like.log_prob(diff_img))
Ejemplo n.º 4
0
    def joint_log_prob(z):
        gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
        diff_img = gen_out - tf.constant(noisy_mat4d)
        visible_img = tf.boolean_mask(diff_img, mask)

        prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim,
                                                        dtype=np.float32),
                                           scale_diag=np.ones(
                                               z_dim, dtype=np.float32))
        like = tfd.MultivariateNormalDiag(
            loc=np.zeros(dim_inpaint, dtype=np.float32),
            scale_diag=np.sqrt(noise_var) *
            np.ones(dim_inpaint, dtype=np.float32))

        return (prior.log_prob(z) + like.log_prob(visible_img))
Ejemplo n.º 5
0
#noisy_mat4d = np.tile(noisy_mat3d, (batch_size, 1, 1, 1)).astype(np.float32)
#noisy_mat4d = np.load('./oed_experiments/random_sample_zmap/tile2x2/digit{}_var{}/noisy_mat4d.npy'.format(PARAMS.digit, PARAMS.noise_var))

if (iter_no != 0):
    var_info = np.load(sample_dir + '/../var_info.npy'.format(sample_dir))
    #var_info = np.load('./oed_experiments/oed/tile2x2/digit{}_var{}_N{}/var_info.npy'.format(PARAMS.digit, noise_var, N))
    if (iter_no == 1):
        tile_tl_row = int(var_info[iter_no - 1, 0])
        tile_tl_col = int(var_info[iter_no - 1, 1])
    else:
        rows = var_info[:iter_no, 0].astype(np.int32)
        cols = var_info[:iter_no, 1].astype(np.int32)

with tf.Graph().as_default() as g:
    z1 = tf.placeholder(tf.float32, shape=[batch_size, z_dim])
    gen_out = gen(z1, reuse=tf.AUTO_REUSE, training=False)
    diff_img = gen_out - tf.constant(noisy_mat4d)

    model_path = './checkpoints/mnist_wgan_gp1000/Epoch_(999)_(171of171).ckpt'
    variables_to_restore = slim.get_variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

with tf.Session(graph=g) as sess:
    saver.restore(sess, model_path)

    loss = np.zeros((n_eff))
    x_mean = np.zeros((28, 28, 1))
    x2_mean = np.zeros((28, 28, 1))
    for ii in range(n_iter):
        g_z, diff = sess.run([gen_out, diff_img],
                             feed_dict={
Ejemplo n.º 6
0
    #initial_state = tf.constant(np.zeros((batch_size, z_dim)).astype(np.float32))
    initial_state = tf.constant(
        np.random.normal(size=[batch_size, z_dim]).astype(np.float32))
    samples, [st_size, log_accept_ratio] = tfp.mcmc.sample_chain(
        num_results=N,
        num_burnin_steps=burn,
        current_state=initial_state,
        kernel=adaptive_hmc,
        trace_fn=lambda _, pkr: [
            pkr.inner_results.accepted_results.step_size, pkr.inner_results.
            log_accept_ratio
        ])
    p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))

    zz = tf.placeholder(tf.float32, shape=[N - burn, z_dim])
    gen_out1 = gen(zz, reuse=tf.AUTO_REUSE, training=False)

    model_path = './checkpoints/mnist_wgan_gp1000/Epoch_(999)_(171of171).ckpt'
    variables_to_restore = slim.get_variables_to_restore()
    #variables_to_restore = [v for v in variables if v.name.split(':')[0]!='dummy']
    saver = tf.train.Saver(variables_to_restore)

with tf.Session(graph=g) as sess:

    saver.restore(sess, model_path)

    samples_ = sess.run(samples)
    np.save(save_dir + '/samples.npy', samples_)
    #np.save(save_dir+'/noisy_mat4d.npy', noisy_mat4d)
    #print('acceptance ratio = {}'.format(sess.run(p_accept)))
Ejemplo n.º 7
0
        cov=np.eye(dim_like, dim_like) * PARAMS.noise_var,
        size=1)
    np.save(meas_path + '/noise_mat3d.npy', noise_mat3d)
else:
    noise_mat3d = np.load(meas_path + '/noise_mat3d.npy')

y_hat3d = test_sample + np.reshape(noise_mat3d,
                                   [PARAMS.img_h, PARAMS.img_w, PARAMS.img_c])
y_hat4d = np.tile(y_hat3d, (PARAMS.batch_size, 1, 1, 1)).astype(np.float32)

with tf.Graph().as_default() as g:
    z1 = tf.placeholder(tf.float32, shape=[PARAMS.batch_size, PARAMS.z_dim])
    dummy = tf.Variable(name='dummy', trainable=False, initial_value=z1)
    assign_op1 = dummy.assign(z1)

    gen_out = gen(dummy, reuse=tf.AUTO_REUSE, training=False)
    diff_img = gen_out - tf.constant(y_hat4d)

    loss = 0.5 * tf.linalg.norm(diff_img)**2 + (0.5 * PARAMS.noise_var *
                                                tf.linalg.norm(dummy)**2)
    optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss,
                                                       var_list=[dummy],
                                                       options={
                                                           'maxiter': 10000,
                                                           'disp': True
                                                       })

    variables = slim.get_variables_to_restore()
    variables_to_restore = [
        v for v in variables if v.name.split(':')[0] != 'dummy'
    ]