예제 #1
0
def saving_graph(directory='/tmp/test-saving-model'):
    graph = htf.graph_builder(0, output_forces=False)
    pos_norm = tf.norm(graph.positions, axis=1)
    graph.save_tensor(pos_norm, 'v1')
    graph.running_mean(pos_norm, 'v2')
    graph.save(directory)
    return directory
예제 #2
0
def benchmark_nonlist_graph(directory='/tmp/benchmark-nonlist-model'):
    graph = htf.graph_builder(0, output_forces=True)
    ps = tf.norm(graph.positions, axis=1)
    energy = graph.safe_div(1., ps)
    force = graph.compute_forces(energy)
    graph.save(directory, force_tensor=force, out_nodes=[energy])
    return directory
예제 #3
0
def trainable_graph(NN, directory='/tmp/test-trainable-model'):
    graph = htf.graph_builder(NN)
    nlist = graph.nlist[:, :, :3]
    # get r
    r = tf.norm(nlist, axis=2)
    # compute 1 / r while safely treating r = 0.
    # pairwise energy. Double count -> divide by 2
    epsilon = tf.Variable(1.0, name='lj-epsilon')
    sigma = tf.Variable(1.0, name='lj-sigma')
    tf.summary.scalar('lj-epsilon', epsilon)
    inv_r6 = graph.safe_div(sigma**6, r**6)
    p_energy = epsilon / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1)
    check = tf.check_numerics(p_energy, 'Your tensor is invalid')
    forces = graph.compute_forces(energy)
    tf.summary.histogram('forces', forces)
    optimizer = tf.train.AdamOptimizer(1e-4)
    gvs = optimizer.compute_gradients(energy)
    print(gvs)
    # capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var)
    # for grad, var in gvs]
    train_op = optimizer.apply_gradients(gvs)
    # put non-trainable items
    # need to do reduction so batch size independent
    avg_energy = graph.running_mean(tf.reduce_sum(energy), 'avg-energy')
    # check = tf.add_check_numerics_ops()
    graph.save(force_tensor=forces,
               model_directory=directory,
               out_nodes=[train_op, check, avg_energy])
    return directory
예제 #4
0
def lj_force_matching(NN=15, directory='/tmp/test-lj-force-matching'):
    graph = htf.graph_builder(NN, output_forces=False)
    # make trainable variables
    epsilon = tf.Variable(0.9, name='lj-epsilon', trainable=True)
    sigma = tf.Variable(1.1, name='lj-sigma', trainable=True)
    # get LJ potential using our variables
    # uses built in nlist_rinv which provides
    # r^-1 with each neighbor
    inv_r6 = sigma**6 * graph.nlist_rinv**6
    # use 2 * epsilon because nlist is double-counted
    p_energy = 2.0 * epsilon * (inv_r6**2 - inv_r6)
    # sum over pairs to get total energy
    energy = tf.reduce_sum(p_energy, axis=1, name='energy')
    # compute forces
    computed_forces = graph.compute_forces(energy)
    # compare hoomd-blue forces (graph.forces) with our
    # computed forces
    minimizer, loss = htf.force_matching(graph.forces[:, :3],
                                         computed_forces[:, :3],
                                         learning_rate=1e-2)
    # save loss so we can visualize later
    graph.save_tensor(loss, 'loss')
    # Make sure to have minimizer in out_nodes so that
    # the force matching occurs!
    graph.save(model_directory=directory, out_nodes=[minimizer])
    return directory
예제 #5
0
def feeddict_graph(directory='/tmp/test-feeddict-model'):
    graph = htf.graph_builder(9 - 1, output_forces=False)
    forces = graph.forces[:, :3]
    force_com = tf.reduce_mean(forces, axis=0)
    thing = tf.placeholder(dtype=tf.float32, name='test-tensor')
    out = force_com * thing
    graph.save(directory, out_nodes=[out])
    return directory
예제 #6
0
def wrap_graph(directory='/tmp/test-wrap-model'):
    graph = htf.graph_builder(0, output_forces=False)
    p1 = graph.positions[0, :3]
    p2 = graph.positions[-1, :3]
    r = p1 - p2
    rwrap = graph.wrap_vector(r)
    # TODO: Smoke test. Think of a better test.
    graph.save(directory, out_nodes=[rwrap])
    return directory
예제 #7
0
def benchmark_gradient_potential():
    graph = htf.graph_builder(1024, 64)
    nlist = graph.nlist[:, :, :3]
    # get r
    r = tf.norm(nlist, axis=2)
    # compute 1 / r while safely treating r = 0.
    energy = tf.reduce_sum(graph.safe_div(1., r), axis=1)
    forces = graph.compute_forces(energy)
    graph.save(force_tensor=forces,
               model_directory='/tmp/benchmark-gradient-potential-model')
예제 #8
0
def lj_mol(NN, MN, directory='/tmp/test-lj-mol'):
    graph = htf.graph_builder(NN)
    graph.build_mol_rep(MN)
    # assume particle (w) is 0
    r = graph.safe_norm(graph.mol_nlist, axis=3)
    rinv = graph.safe_div(1.0, r)
    mol_p_energy = 4.0 / 2.0 * (rinv**12 - rinv**6)
    total_e = tf.reduce_sum(mol_p_energy)
    forces = graph.compute_forces(total_e)
    graph.save(force_tensor=forces, model_directory=directory, out_nodes=[])
    return directory
예제 #9
0
def noforce_graph(directory='/tmp/test-noforce-model'):
    graph = htf.graph_builder(9 - 1, output_forces=False)
    nlist = graph.nlist[:, :, :3]
    neighs_rs = tf.norm(nlist, axis=2)
    energy = graph.safe_div(numerator=tf.ones_like(neighs_rs,
                                                   dtype=neighs_rs.dtype),
                            denominator=neighs_rs,
                            name='energy')
    pos_norm = tf.norm(graph.positions, axis=1)
    graph.save(directory, out_nodes=[energy, pos_norm])
    return directory
예제 #10
0
def make_eds_graph(N, NN, directory, cv_op, set_point):
    '''Currently only computes running mean'''

    graph = htf.graph_builder(N-1, output_forces=True)
    A = tf.constant(0.3, name='A', dtype=tf.float32)
    kt = 1.0  # should match the kt from the nvt ensemble in hoomd
    beta = 1/kt
    bias_energy = tf.Variable(tf.ones(N, dtype=tf.float32),
                              dtype=tf.float32, name='energy')
    steps = tf.Variable(1.0, name='steps')
    mean = tf.Variable(0.1, name='mean')
    gradient = tf.Variable(0.1, name='gradient')
    # gradient for Gradient Descend algorithm
    alpha = tf.Variable(0.001, name='alpha')  # coupling constant
    set_point = tf.Variable(set_point, name='set_point')
    # reference value or the bias reference
    colvar = tf.Variable(0.0, name='colvar_inst')
    # col var for outputing the instantenous cv
    colvar_sq = tf.Variable(0.0, name='cv0')
    # col var for outputing the instantenous cv^2
    aver_r, cv_sq = cv_op(graph, N)
    # this outputs the mean cv, cv^2 and a print
    update_colvar_op = colvar.assign(aver_r)
    # appending the new cv to the colvar variable
    update_cv_op = colvar_sq.assign(cv_sq)
    # appedning the new cv^2 to the colvar_sq variable
    run_cv = graph.running_mean(colvar, name='mean_cv')
    # takes the running mean of the cv
    variance = graph.running_mean(colvar_sq, name='cv2')-(run_cv**2)
    # variance of the collective variable
    gradient_new = -2*beta*(graph.safe_div(run_cv, set_point)-1)*(variance)
    # g_tau (gradient at time tau) in the EDS paper aka eq 5
    norm_gradient = (gradient_new**2+gradient**2)**0.5
    # sq root of the gradients squared aka A/learning_rate
    learning_rate = graph.safe_div(A, norm_gradient)  # learning rate
    alpha_val = alpha-learning_rate*gradient_new
    # new coupling constant
    bias_potential = tf.reduce_sum((graph.safe_div((alpha_val*aver_r),
                                                   set_point)))
    # computing the potential energy due to the bias
    update_step_op = steps.assign_add(1.0)
    update_norm_gradient = gradient.assign(gradient_new)
    update_alpha_op = alpha.assign(alpha_val)
    update_mean_op = mean.assign(run_cv)
    bias_energy2 = bias_energy*bias_potential
    # expanding the bais potential to be for for n particles
    forces = graph.compute_forces(bias_energy2)
    # computing the forces from the bias energy
    graph.save(model_directory=directory, out_nodes=[
            update_step_op, update_colvar_op, update_cv_op, update_mean_op,
            print_op3, update_norm_gradient, update_alpha_op],
               force_tensor=forces, virial=None)
예제 #11
0
def gradient_potential():
    graph = htf.graph_builder(9 - 1)
    with tf.name_scope('force-calc') as scope:
        nlist = graph.nlist[:, :, :3]
        neighs_rs = tf.norm(nlist, axis=2)
        energy = 0.5 * graph.safe_div(numerator=tf.ones_like(
            neighs_rs, dtype=neighs_rs.dtype),
                                      denominator=neighs_rs,
                                      name='energy')
    forces = graph.compute_forces(energy)
    graph.save(force_tensor=forces,
               model_directory='/tmp/test-gradient-potential-model',
               out_nodes=[energy])
예제 #12
0
def lj_running_mean(NN, directory='/tmp/test-lj-running-mean-model'):
    graph = htf.graph_builder(NN)
    # pairwise energy. Double count -> divide by 2
    inv_r6 = graph.nlist_rinv**6
    p_energy = 4.0 / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1)
    forces = graph.compute_forces(energy)
    avg_energy = graph.running_mean(tf.reduce_sum(energy, axis=0),
                                    'average-energy')
    graph.save(force_tensor=forces,
               model_directory=directory,
               out_nodes=[avg_energy])
    return directory
예제 #13
0
def lj_graph(NN, directory='/tmp/test-lj-potential-model'):
    graph = htf.graph_builder(NN)
    nlist = graph.nlist[:, :, :3]
    # get r
    r = tf.norm(nlist, axis=2)
    # compute 1 / r while safely treating r = 0.
    # pairwise energy. Double count -> divide by 2
    inv_r6 = graph.safe_div(1., r**6)
    p_energy = 4.0 / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1)
    forces = graph.compute_forces(energy)
    graph.save(force_tensor=forces, model_directory=directory)
    return directory
예제 #14
0
def lj_rdf(NN, directory='/tmp/test-lj-rdf-model'):
    graph = htf.graph_builder(NN)
    # pairwise energy. Double count -> divide by 2
    inv_r6 = graph.nlist_rinv**6
    p_energy = 4.0 / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1, name='energy')
    forces = graph.compute_forces(energy)
    # compute rdf between type 0 and 0
    rdf = graph.compute_rdf([3, 5], 'rdf', 10, 0, 0)
    avg_rdf = graph.running_mean(rdf, 'avg-rdf')
    # check = tf.add_check_numerics_ops()
    graph.save(force_tensor=forces, model_directory=directory)
    return directory
예제 #15
0
def mol_features_graph(directory='/tmp/test-mol-features'):
    graph = htf.graph_builder(50, output_forces=False)
    graph.build_mol_rep(6)
    mol_pos = graph.mol_positions
    r = htf.mol_bond_distance(mol_pos, 2, 1)
    a = htf.mol_angle(mol_pos, 1, 2, 3)
    d = htf.mol_dihedral(mol_pos, 1, 2, 3, 4)
    avg_r = tf.reduce_mean(r)
    avg_a = tf.reduce_mean(a)
    avg_d = tf.reduce_mean(d)
    graph.save_tensor(avg_r, 'avg_r')
    graph.save_tensor(avg_a, 'avg_a')
    graph.save_tensor(avg_d, 'avg_d')
    graph.save(model_directory=directory)
    return directory
예제 #16
0
def lj_force_output(NN, directory='/tmp/test-lj-rdf-model'):
    ops = []
    graph = htf.graph_builder(NN, output_forces=False)
    # pairwise energy. Double count -> divide by 2
    inv_r6 = graph.nlist_rinv**6
    p_energy = 4.0 / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1)
    tf_forces = graph.compute_forces(energy)
    h_forces = graph.forces
    error = tf.losses.mean_squared_error(tf_forces, h_forces)
    v = tf.get_variable('error', shape=[])
    ops.append(v.assign(error))
    graph.save(model_directory=directory, out_nodes=ops)
    return directory
예제 #17
0
def run_traj_graph(directory='/tmp/test-run-traj'):
    graph = htf.graph_builder(128)
    nlist = graph.nlist[:, :, :3]
    r = tf.norm(nlist, axis=2)
    # compute 1 / r while safely treating r = 0.
    # pairwise energy. Double count -> divide by 2
    inv_r6 = graph.safe_div(1., r**6)
    p_energy = 4.0 / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1)
    forces = graph.compute_forces(energy)
    avg_energy = graph.running_mean(tf.reduce_sum(energy, axis=0),
                                    'average-energy')
    graph.save(force_tensor=forces,
               model_directory=directory,
               out_nodes=[avg_energy])
    return directory
예제 #18
0
def print_graph(NN, directory='/tmp/test-print-model'):
    graph = htf.graph_builder(NN)
    nlist = graph.nlist[:, :, :3]
    # get r
    r = tf.norm(nlist, axis=2)
    # compute 1 / r while safely treating r = 0.
    # pairwise energy. Double count -> divide by 2
    inv_r6 = graph.safe_div(1., r**6)
    p_energy = 4.0 / 2.0 * (inv_r6 * inv_r6 - inv_r6)
    # sum over pairwise energy
    energy = tf.reduce_sum(p_energy, axis=1)
    forces = graph.compute_forces(energy)
    prints = tf.Print(energy, [energy], summarize=1000)
    graph.save(force_tensor=forces,
               model_directory=directory,
               out_nodes=[prints])
    return directory
예제 #19
0
def eds_graph(directory='/tmp/test-lj-eds'):
    graph = htf.graph_builder(0)
    # get distance from center
    rvec = graph.wrap_vector(graph.positions[0, :3])
    cv = tf.norm(rvec)
    cv_mean = graph.running_mean(cv, name='cv-mean')
    alpha = htf.eds_bias(cv, 4, 5, cv_scale=1 / 5, name='eds')
    alpha_mean = graph.running_mean(alpha, name='alpha-mean')
    # eds + harmonic bond
    energy = (cv - 5)**2 + cv * alpha
    # energy  = cv^2 - 6cv + cv * alpha + C
    # energy = (cv - (3 + alpha / 2))^2 + C
    # alpha needs to be = 4
    forces = graph.compute_forces(energy, positions=True)
    graph.save(force_tensor=forces,
               model_directory=directory,
               out_nodes=[cv_mean, alpha_mean])
    return directory
예제 #20
0
def custom_nlist(NN, r_cut, system, directory='/tmp/test-custom-nlist'):
    graph = htf.graph_builder(NN, output_forces=False)
    nlist = graph.nlist[:, :, :3]
    # get r
    r = tf.norm(nlist, axis=2)
    v = tf.get_variable('hoomd-r',
                        initializer=tf.zeros_like(r),
                        validate_shape=False)
    ops = [v.assign(r)]

    # compute nlist
    cnlist = htf.compute_nlist(graph.positions[:, :3], r_cut, NN, system)
    r = tf.norm(cnlist[:, :, :3], axis=2)
    v = tf.get_variable('htf-r',
                        initializer=tf.zeros_like(r),
                        validate_shape=False)
    ops.append(v.assign(r))

    graph.save(model_directory=directory, out_nodes=ops)
    return directory
예제 #21
0
 def test_load_variables(self):
     model_dir = '/tmp/test-load'
     # make model that does assignment
     g = htf.graph_builder(0, False)
     h = tf.ones([10], dtype=tf.float32)
     v = tf.get_variable('test', shape=[], trainable=False)
     as_op = v.assign(tf.reduce_sum(h))
     g.save(model_dir, out_nodes=[as_op])
     # run once
     hoomd.context.initialize()
     with hoomd.htf.tfcompute(model_dir) as tfcompute:
         system = hoomd.init.create_lattice(
             unitcell=hoomd.lattice.sq(a=4.0), n=[3, 3])
         hoomd.md.integrate.mode_standard(dt=0.005)
         hoomd.md.integrate.nve(
             group=hoomd.group.all()).randomize_velocities(kT=2, seed=2)
         tfcompute.attach(save_period=1)
         hoomd.run(1)
     # load
     vars = htf.load_variables(model_dir, ['test'])
     assert np.abs(vars['test'] - 10) < 10e-10
예제 #22
0
def simple_potential(directory='/tmp/test-simple-potential-model'):
    graph = htf.graph_builder(9 - 1)
    with tf.name_scope('force-calc') as scope:
        nlist = graph.nlist[:, :, :3]
        neighs_rs = tf.norm(nlist, axis=2, keepdims=True)
        # no need to use netwon's law because nlist should be double counted
        fr = tf.multiply(-1.0,
                         tf.multiply(tf.reciprocal(neighs_rs), nlist),
                         name='nan-pairwise-forces')
        with tf.name_scope('remove-nans') as scope:
            zeros = tf.zeros_like(nlist)
            real_fr = tf.where(tf.is_finite(fr),
                               fr,
                               zeros,
                               name='pairwise-forces')
        forces = tf.reduce_sum(real_fr, axis=1, name='forces')
    graph.save(force_tensor=forces, model_directory=directory)
    return directory
    # check graph info
    with open('/tmp/test-simple-potential-model/graph_info.p', 'rb') as f:
        gi = pickle.load(f)
        assert gi['forces'] != 'forces:0'
        assert tf.get_default_graph().get_tensor_by_name(
            gi['forces']).shape[1] == 4
예제 #23
0
# build scattering cross-section
data = [None for _ in range(NN)]
# Neutron Scattering lengths (NIST)
# Hydrogen
data[1] = -3.742
# Carbon
data[6] = 6.646
# Oxygen
data[8] = 5.805
N = len(param_sys.atoms)
cross = np.zeros(len(frame.particles.types), dtype=np.float32)

for a in param_sys.atoms:
    cross[frame.particles.types.index(a.type)] = data[a.element]

graph = graph_builder(NN, output_forces=False)
# get pairwise scattering length bi*bj
bj = tf.gather(cross, tf.cast(graph.nlist[:, :, 3], tf.int32))
bi = tf.gather(cross, tf.cast(graph.positions[:, 3], tf.int32))
bij = tf.einsum('ij,i -> ij', bj, bi)
# get neighbor list
nlist = graph.nlist[:, :, :3]
# get interatomic distances
r = tf.norm(nlist, axis=2)
q = tf.lin_space(0.01, 10., 100)
# q_rij must be QxNxNN - Outer product
# qr[i, j, k] = q[i] * r[j,k]
qr = tf.einsum('i, jk -> ijk', q, r)
intensities = tf.multiply(
    0.5,
    tf.reduce_sum(tf.reduce_sum(bij * tf.sin(qr) *
예제 #24
0
def mol_force(directory='/tmp/test-mol-force-model'):
    graph = htf.graph_builder(0, output_forces=False)
    graph.build_mol_rep(3)
    f = tf.norm(graph.mol_forces, axis=0)
    graph.save(directory, out_nodes=[f])
    return directory
예제 #25
0
import tensorflow as tf
from hoomd.htf import graph_builder
import sys
if (len(sys.argv) != 2):
    print('Usage: build_scattering.py [model_dir]')
    exit(0)

model_dir = sys.argv[1]
b = -3.739  # assuming bunch of Hydrogens
N = 64
graph = graph_builder(N, N - 1, output_forces=False)
# get neighbor list
nlist = graph.nlist[:, :, :3]
# get interatomic distances
r = tf.norm(nlist, axis=2)
q = tf.lin_space(0., 10., 100)
# q_rij must be QxNxNN - Outer product
# qr[i, j, k] = q[i] * r[j,k]
qr = tf.einsum('i, jk -> ijk', q, r)
intensities = tf.multiply(
    0.5,
    tf.reduce_sum(tf.reduce_sum(b * b * tf.sin(qr) *
                                graph_builder.safe_div(1., qr),
                                axis=2),
                  axis=1))
# print_node = tf.Print(intensities, [ intensities], summarize=1000)

avg = tf.Variable(tf.zeros(tf.shape(q)), name='intensity')
steps = tf.Variable(1., name='n')
steps_1 = tf.assign_add(steps, 1.)
avg_op = tf.assign_add(avg, (intensities - avg) / steps)