Пример #1
0
 def joint_log_prob(z):              
     
     gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
     diff_img = tf.reshape(gen_out - tf.constant(noisy_mat4d), [dim_like])
             
     
     prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim, dtype=np.float32), scale_diag=np.ones(z_dim, dtype=np.float32))
     like = tfd.MultivariateNormalDiag(loc=np.zeros(dim_like, dtype=np.float32), scale_diag=np.sqrt(noise_var)*np.ones(dim_like, dtype=np.float32))
     
     return (prior.log_prob(z) + like.log_prob(diff_img))
Пример #2
0
 def joint_log_prob(z):              
     
     gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
     diff_img = gen_out - tf.constant(noisy_mat4d)
     visible_img = tf.boolean_mask(diff_img, mask)
     
     prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim, dtype=np.float32), scale_diag=np.ones(z_dim, dtype=np.float32))
     like = tfd.MultivariateNormalDiag(loc=np.zeros(dim_inpaint, dtype=np.float32), scale_diag=np.sqrt(noise_var)*np.ones(dim_inpaint, dtype=np.float32))
     
     return (prior.log_prob(z) + like.log_prob(visible_img))
Пример #3
0
    def joint_log_prob(z):         
        gen_out = gen(z, reuse=tf.AUTO_REUSE, training=False)
        diff_img = gen_out - tf.constant(noisy_mat4d)
        prior = tfd.MultivariateNormalDiag(loc=np.zeros(z_dim, dtype=np.float32), scale_diag=np.ones(z_dim, dtype=np.float32))
        dim_window = iter_no*(tile_size**2)*3   #n_channels=3

        if(iter_no==0):
            return prior.log_prob(z)
        elif(iter_no==1):            
            diff_img_visible = tf.reshape(tf.slice(diff_img, [0,tile_tl_row, tile_tl_col, 0], [1,tile_size,tile_size,3]), [int(dim_window/iter_no)])
            like = tfd.MultivariateNormalDiag(loc=np.zeros(dim_window, dtype=np.float32), scale_diag=np.sqrt(noise_var)*np.ones(dim_window, dtype=np.float32))
            return (prior.log_prob(z) + like.log_prob(diff_img_visible))
        else:
            ls = []
            for ii in range(iter_no):
                ls.append(tf.reshape(tf.slice(diff_img, [0, rows[ii], cols[ii],0], [1,tile_size,tile_size,3]), [int(dim_window/iter_no)]))
            diff_img_visible = tf.concat(ls, axis=0)
            like = tfd.MultivariateNormalDiag(loc=np.zeros(dim_window, dtype=np.float32), scale_diag=np.sqrt(noise_var)*np.ones(dim_window, dtype=np.float32))        
            return (prior.log_prob(z) + like.log_prob(diff_img_visible))
Пример #4
0
plt.figure(figsize=(15, 6))
for ii in range(25):
    plt.subplot(5,5,ii+1)
    plt.plot(eff_samps[:, ii]);
    plt.ylabel(r'z_{}'.format(ii))
plt.tight_layout()
plt.savefig('./{}/eff_samples'.format(sample_dir))
plt.show()


x_true3d, noisy_mat4d = noisy_meas(batch_size)


with tf.Graph().as_default() as g:
    z1 = tf.placeholder(tf.float32, shape=[batch_size, z_dim])         
    gen_out = gen(z1, reuse=tf.AUTO_REUSE, training=False)
    diff_img = gen_out - tf.constant(noisy_mat4d)

    variables_to_restore = slim.get_variables_to_restore()   
    saver = tf.train.Saver(variables_to_restore)

with tf.Session(graph=g) as sess:
    saver.restore(sess, PARAMS.model_path)
    
    loss = np.zeros((n_eff))
    x_mean = np.zeros((PARAMS.img_h, PARAMS.img_w, PARAMS.img_c))
    x2_mean = np.zeros((PARAMS.img_h, PARAMS.img_w, PARAMS.img_c))    
    for ii in range(n_iter):
        g_z, diff = sess.run([gen_out, diff_img], feed_dict={z1:eff_samps[ii*batch_size:(ii+1)*batch_size, :]})
        x_mean = x_mean + np.mean(g_z, axis=0)
        x2_mean = x2_mean + np.mean(g_z**2, axis=0)
Пример #5
0
                    tfp.mcmc.HamiltonianMonteCarlo(target_log_prob_fn=unnormalized_posterior, step_size=np.float32(0.1), num_leapfrog_steps=3),
                   num_adaptation_steps=int(0.8*burn))    
    

   
    initial_state = tf.constant(np.random.normal(size=[batch_size, z_dim]).astype(np.float32))
    samples, [st_size, log_accept_ratio] = tfp.mcmc.sample_chain(
      num_results=N,
      num_burnin_steps=burn,
      current_state=initial_state,
      kernel=adaptive_hmc,
      trace_fn=lambda _, pkr: [pkr.inner_results.accepted_results.step_size,
                             pkr.inner_results.log_accept_ratio])
    p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))

     
    zz = tf.placeholder(tf.float32, shape=[N-burn, z_dim])    
    gen_out1 = gen(zz, reuse=tf.AUTO_REUSE, training=False)
    
    variables_to_restore = slim.get_variables_to_restore()   
    saver = tf.train.Saver(variables_to_restore)
    

with tf.Session(graph=g) as sess:
    
    saver.restore(sess, PARAMS.model_path)    
    
    samples_ = sess.run(samples)    
    np.save(save_dir+'/samples.npy', samples_)
    print('acceptance ratio = {}'.format(sess.run(p_accept)))
Пример #6
0
print(data_pool.batch()[0,:,:,:].shape)

dim_like = 64*64*3
#noise_mat3d = np.random.multivariate_normal(mean=np.zeros((dim_like)), cov=np.eye(dim_like, dim_like)*like_var, size=1)
#np.save('./data/noise3d_var{}_seed{}'.format(like_var, seed_no), np.reshape(noise_mat3d, [64,64,3]))
#noisy_mat3d = data_pool.batch()[0,:,:,:] + np.reshape(noise_mat3d, [64,64,3])
noisy_mat3d = data_pool.batch()[0,:,:,:] + np.load('./data/noise3d_var{}_seed{}.npy'.format(PARAMS.noise_var, seed_no))
noisy_mat4d = np.tile(noisy_mat3d, (batch_size, 1, 1, 1)).astype(np.float32)


with tf.Graph().as_default() as g:
    z1 = tf.placeholder(tf.float32, shape=[batch_size, z_dim])    
    dummy = tf.Variable(name='dummy', trainable=False, initial_value=z1)   
    assign_op1 = dummy.assign(z1)
    
    gen_out = gen(dummy, reuse=tf.AUTO_REUSE, training=False)
    diff_img = gen_out - tf.constant(noisy_mat4d)
    
    loss = 0.5*tf.linalg.norm(diff_img)**2 + (0.5*PARAMS.noise_var*tf.linalg.norm(dummy)**2)
    
    optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss, var_list=[dummy], options={'maxiter': 10000, 'disp':True})
    
    
    
    
    variables = slim.get_variables_to_restore()    
    variables_to_restore = [v for v in variables if v.name.split(':')[0]!='dummy'] 
    
    saver = tf.train.Saver(variables_to_restore)

z_sample = np.random.normal(size=[batch_size, z_dim])