Exemple #1
0
    def sample(self, model, samples, r_atoms, z_atoms, n_samples):

        curr_sample = self.curr_sample
        curr_log_amp = self.curr_log_amp
        curr_prob = self.curr_prob

        acceptance_total = 0
        for _ in range(self.correlation_length):
            e_loc, _ = compute_local_energy(
                r_atoms, samples, z_atoms,
                model)  # r_atoms, r_electrons, z_atoms, model
            e_loc_centered = e_loc - tf.reduce_mean(e_loc)
            curr_grad = extract_grads(self.model, curr_sample, e_loc_centered,
                                      n_samples)

            # intermediate
            intermediate_sample = curr_sample + curr_grad

            # next sample
            new_sample = self.distr.resample(intermediate_sample)
            new_log_amp, _, _ = self.model(new_sample)
            new_prob = self.to_prob(new_log_amp)

            e_loc, _ = compute_local_energy(
                r_atoms, samples, z_atoms,
                model)  # r_atoms, r_electrons, z_atoms, model
            e_loc_centered = e_loc - tf.reduce_mean(e_loc)
            new_sample_grad = extract_grads(model, new_sample, e_loc_centered,
                                            n_samples)

            # update sample
            alpha = new_prob / curr_prob
            tf.debugging.check_numerics(alpha, 'houston, we have a problem')

            mask = alpha > self.alpha_distr.sample(alpha.shape)
            stacked_mask = tf.tile(tf.reshape(mask, (-1, 1, 1)),
                                   (1, *new_sample.shape[1:]))

            curr_sample = tf.where(stacked_mask, new_sample, curr_sample)
            curr_log_amp = tf.where(mask, new_log_amp, curr_log_amp)
            curr_prob = tf.where(mask, new_prob, curr_prob)

            acceptance_total += tf.reduce_mean(tf.cast(mask, dtype))

        self.curr_sample = curr_sample
        self.curr_log_amp = curr_log_amp
        self.curr_prob = curr_prob

        return curr_sample, curr_log_amp, acceptance_total / self.correlation_length
Exemple #2
0
def branching_factor(R_old, R_new, tau, E_T):
    e_loc_old = compute_local_energy(r_atoms, R_old, z_atoms, model)
    e_loc_new = compute_local_energy(r_atoms, R_new, z_atoms, model)
    # e_old = tf.reduce_mean(e_loc_old)  # for energy
    # e_new = tf.reduce_mean(e_loc_new)
    return tf.exp(-tau * (1 / 2 * (e_loc_new + e_loc_old) - E_T))
Exemple #3
0
def green_branching(R_old, R_new, tau):
    # for energy
    e_loc_old = compute_local_energy(r_atoms, R_old, z_atoms, model)
    e_loc_new = compute_local_energy(r_atoms, R_new, z_atoms, model)
    return tf.exp(-1 / 2 * (e_loc_new + e_loc_old) * tau)
Exemple #4
0
system = 'Be'
DIR = ''
config = {}
config['n_pretrain_batches'] = 1

pretrainer = ''

vmc_sampler = MetropolisHasting(hydrogen_log_psi, pretrainer, sample_space,
                                n_samples, n_electrons, correlation_length, 10,
                                n_atoms, r_atoms, [1], n_spin_up)
samples = tf.random.normal((n_samples, n_electrons, 3))
vmc_samples, _, _ = vmc_sampler.sample(samples)
model = hydrogen_log_psi

R_new = samples
e_loc = compute_local_energy(r_atoms, R_new, z_atoms, model)
E_T = tf.reduce_mean(e_loc)
tau = 0.01

block_size = 10

writer = tf.summary.create_file_writer('runs/11')
with writer.as_default():
    for block in range(n_blocks):

        # vmc energy
        vmc_samples, _, _ = vmc_sampler.sample(vmc_samples)
        vmc_e_loc = compute_local_energy(r_atoms, R_new, z_atoms, model)
        vmc_e_mean = tf.reduce_mean(vmc_e_loc)

        r_vmc_hist = tf.linalg.norm(vmc_samples, axis=-1)
Exemple #5
0
                                      r_atoms,
                                      [1],
                                      n_spin_up)

    samples = tf.random.normal((n_samples, n_electrons, 3))

    # Unsupervised training

    for _ in range(1):
        sample_initial_according = model_sampler.initialize_samples()  # argument for the bottom function
        samples, amplitudes, acceptance = model_sampler.sample(samples)
        print('Acceptance percentage: ', acceptance * 100.)

        # Compute the gradients

        e_loc = compute_local_energy(r_atoms, samples, z_atoms, model)  # r_atoms, r_electrons, z_atoms, model
        e_loc_centered = e_loc - tf.reduce_mean(e_loc)
        grads = extract_grads(model, samples, e_loc_centered, n_samples)

        # Update the model
        optimizer.apply_gradients(zip(grads, model.trainable_weights))
        print_neat('Energy: ', tf.reduce_mean(e_loc), 5)

    n_grads = sum([len(grad) for grad in grads])
    grads = sum([float(tf.reduce_sum(tf.math.abs(grad))) for grad in grads]) / n_grads
    print_neat('Mean abs update: ', tf.reduce_mean(tf.math.abs(grads)), 5)
    distr = RandomWalker(tf.zeros(3, dtype=dtype),
                         tf.eye(3, dtype=dtype) * 0.5,
                         tf.zeros(3, dtype=dtype),
                         tf.eye(3, dtype=dtype) * 0.2)
Exemple #6
0
def green_branching(R_new, R_old, tau, E_T):
    # for energy
    e_loc_old = compute_local_energy(r_atoms, R_old, z_atoms, model)
    e_loc_new = compute_local_energy(r_atoms, R_new, z_atoms, model)
    return tf.exp(((-0.5 * (e_loc_new + e_loc_old)) - E_T) * tau)
Exemple #7
0
correlation_length = 25
system = 'Be'
DIR = ''
config = {}
config['n_pretrain_batches'] = 1

pretrainer = ''

vmc_sampler = MetropolisHasting(hydrogen_log_psi, pretrainer, sample_space,
                                n_samples, n_electrons, correlation_length, 10,
                                n_atoms, r_atoms, [1], n_spin_up)
samples = tf.random.normal((n_samples, n_electrons, 3))
vmc_samples, _, _ = vmc_sampler.sample(samples)

R_new = samples
e_loc = compute_local_energy(r_atoms, R_new, z_atoms, model)
E_T = tf.reduce_mean(e_loc)
tau = 0.01

block_size = 10

branch = True
writer = tf.summary.create_file_writer('runs/dmc_test3')
with writer.as_default():
    for block in range(n_blocks):
        for iteration in range(block_size):
            for electron in range(n_electrons):
                R_old = R_new
                R_new = new_move(R_old, tau, electron,
                                 model)  # (n_samples, n_electrons, 3)
                log_phi_old, sign_old, _, _, _ = model(R_old)