コード例 #1
0
def test_clist_nl():
    """Cell list neighbor test
    Compare with ASE implementation
    """
    from ase.build import bulk
    from ase.neighborlist import neighbor_list
    from pinn.layers import cell_list_nl

    to_test = [bulk('Cu'), bulk('Mg'), bulk('Fe')]
    ind, coord, cell = [], [], []
    for i, a in enumerate(to_test):
        ind.append([[i]] * len(a))
        coord.append(a.positions)
        cell.append(a.cell)

    with tf.Graph().as_default():
        tensors = {
            'ind_1': tf.constant(np.concatenate(ind, axis=0), tf.int32),
            'coord': tf.constant(np.concatenate(coord, axis=0), tf.float32),
            'cell': tf.constant(np.stack(cell, axis=0), tf.float32)
        }
        nl = cell_list_nl(tensors, rc=10)
        with tf.Session() as sess:
            dist_pinn = sess.run(nl['dist'])

    dist_ase = []
    for a in to_test:
        dist_ase.append(neighbor_list('d', a, 10))
    dist_ase = np.concatenate(dist_ase, 0)
    assert np.all(np.sort(dist_ase) - np.sort(dist_pinn) < 1e-4)
コード例 #2
0
def lj(tensors, rc=3.0, sigma=1.0, epsilon=1.0):
    """Lennard-Jones Potential

    This is a simple implementation of LJ potential
    for the purpose of testing distance/force calculations
    rather than a network.

    Args:
        tensors: input data (nested tensor from dataset).
        rc: cutoff radius.
        sigma, epsilon: LJ parameters
    """
    tensors.update(cell_list_nl(tensors, rc=rc))
    connect_dist_grad(tensors)
    e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
    c6 = (sigma / tensors['dist'])**6
    c12 = c6**2
    en = 4 * epsilon * (c12 - c6) - e0
    natom = tf.shape(tensors['ind_1'])[0]
    nbatch = tf.reduce_max(tensors['ind_1']) + 1
    en = tf.unsorted_segment_sum(en, tensors['ind_2'][:, 0], natom)
    return en / 2.0
コード例 #3
0
def pinet(tensors,
          pp_nodes=[16, 16],
          pi_nodes=[16, 16],
          ii_nodes=[16, 16],
          en_nodes=[16, 16],
          depth=4,
          atom_types=[1, 6, 7, 8],
          act='tanh',
          rc=4.0,
          cutoff_type='f1',
          basis_type='polynomial',
          n_basis=4,
          gamma=3.0,
          preprocess=False):
    """Network function for the PiNet neural network

    Args:
        tensors: input data (nested tensor from dataset).
        atom_types (list): elements for the one-hot embedding.
        pp_nodes (list): number of nodes for pp layer.
        pi_nodes (list): number of nodes for pi layer.
        ii_nodes (list): number of nodes for ii layer.
        en_nodes (list): number of nodes for en layer.
        depth (int): number of interaction blocks.
        rc (float): cutoff radius.
        basis_type (string): type of basis function to use,
            can be "polynomial" or "gaussian".
        gamma (float): controls width of gaussian function for gaussian basis.
        n_basis (int): number of basis functions to use.
        cutoff_type (string): cutoff function to use with the basis.
        act (string): activation function to use.
        preprocess (bool): whether to return the preprocessed tensor.

    Returns:
        prediction or preprocessed tensor dictionary
    """
    if "ind_2" not in tensors:
        tensors.update(cell_list_nl(tensors, rc))
        connect_dist_grad(tensors)
        tensors['embed'] = atomic_onehot(tensors['elems'], atom_types)
    else:
        connect_dist_grad(tensors)
    if preprocess:
        return tensors
    # Basis function
    if basis_type == 'polynomial':
        basis = polynomial_basis(tensors['dist'], cutoff_type, rc, n_basis)
    elif basis_type == 'gaussian':
        basis = gaussian_basis(tensors['dist'], cutoff_type, rc, n_basis,
                               gamma)

    # We name some tensors here
    nodes = {1: tf.identity(tensors['embed'], name='embed')}
    diff = tf.identity(tensors['diff'], name='diff')
    coord = tf.identity(tensors['coord'], name='coord')
    elems = tf.identity(tensors['elems'], name='elems')
    ind_1 = tf.identity(tensors['ind_1'], name='ind_1')
    ind_2 = tf.identity(tensors['ind_2'], name='ind_2')
    basis = tf.expand_dims(basis, -2)
    natom = tf.shape(tensors['ind_1'])[0]
    # Then Construct the model
    output = 0.0
    for i in range(depth):
        if i > 0:
            nodes[1] = fc_layer(nodes[1],
                                pp_nodes,
                                act=act,
                                name='pp-{}'.format(i))
        nodes[2] = pi_layer(ind_2,
                            nodes[1],
                            basis,
                            pi_nodes,
                            act=act,
                            name='pi-{}'.format(i))
        nodes[2] = fc_layer(nodes[2],
                            ii_nodes,
                            use_bias=False,
                            act=act,
                            name='ii-{}'.format(i))
        if nodes[1].shape[-1] != nodes[2].shape[-1]:
            nodes[1] = tf.layers.dense(nodes[1],
                                       nodes[2].shape[-1],
                                       use_bias=False,
                                       activation=None)
        nodes[1] = tf.add(nodes[1],
                          ip_layer(ind_2,
                                   nodes[2],
                                   natom,
                                   name='ip_{}'.format(i)),
                          name='prop_{}'.format(i))
        output = tf.add(output,
                        en_layer(nodes[1],
                                 en_nodes,
                                 act=act,
                                 name='en_{}'.format(i)),
                        name='out_{}'.format(i))

    return output
コード例 #4
0
def bpnn(tensors,
         sf_spec,
         nn_spec,
         rc=5.0,
         act='tanh',
         cutoff_type='f1',
         fp_range=[],
         fp_scale=False,
         preprocess=False,
         use_jacobian=True):
    """ Network function for Behler-Parrinello Neural Network

    Example of sf_spec::

        [{'type':'G2', 'i': 1, 'j': 8, 'Rs': [1.,2.], 'eta': [0.1,0.2]},
         {'type':'G2', 'i': 8, 'j': 1, 'Rs': [1.,2.], 'eta': [0.1,0.2]},
         {'type':'G4', 'i': 8, 'j': 8, 'lambd':[0.5,1], 'zeta': [1.,2.], 'eta': [0.1,0.2]}]

    The symmetry functions are defined according to the paper:

        Behler, Jörg. “Constructing High-Dimensional Neural Network Potentials: A Tutorial Review.” 
        International Journal of Quantum Chemistry 115, no. 16 (August 15, 2015): 103250. 
        https://doi.org/10.1002/qua.24890.
        (Note the naming of symmetry functions is different from http://dx.doi.org/10.1063/1.3553717)

    For more detials about symmetry functions, see the definitions of symmetry functions.

    Example of nn_spec::

        {8: [32, 32, 32], 1: [16, 16, 16]}


    Args:
        tensors: input data (nested tensor from dataset).
        sf_spec (dict): symmetry function specification.
        nn_spec (dict): elementwise network specification,
            each key points to a list specifying the
            number of nodes in the feed-forward subnets.
        rc (float): cutoff radius.
        cutoff_type (string): cutoff function to use. 
        act (str): activation function to use in dense layers.
        fp_scale (bool): scale the fingerprints according to fp_range.
        fp_range (list of [min, max]): the atomic fingerprint range for each SF
            used to pre-condition the fingerprints.
        preprocess (bool): whether to return the preprocessed tensor.
        use_jacobian (bool): whether to reconnect the grads of fingerprints.
            note that one must use the jacobian if one want forces with 
            preprocessing, the option is here mainly for verifying the 
            jacobian implementation.

    Returns:
        prediction or preprocessed tensor dictionary
    """
    if 'ind_2' not in tensors:
        tensors.update(cell_list_nl(tensors, rc))
        connect_dist_grad(tensors)
        tensors['cutoff_func'] = cutoff_func(tensors['dist'], cutoff_type, rc)
        tensors.update(bp_symm_func(tensors, sf_spec, rc, cutoff_type))
        tensors.pop('dist')
        tensors.pop('cutoff_func')
        tensors.pop('ind_3', None)
    else:
        connect_dist_grad(tensors)
    if preprocess:
        return tensors
    fps = make_fps(tensors, sf_spec, nn_spec, use_jacobian, fp_range, fp_scale)
    output = 0.0
    n_atoms = tf.shape(tensors['elems'])[0]
    for k, v in nn_spec.items():
        ind = tf.where(tf.equal(tensors['elems'], k))
        with tf.variable_scope("BP_DENSE_{}".format(k)):
            nodes = fps[k]
            for n_node in v:
                nodes = tf.layers.dense(nodes, n_node, activation=act)
            atomic_en = tf.layers.dense(nodes,
                                        1,
                                        activation=None,
                                        use_bias=False,
                                        name='E_OUT_{}'.format(k))
        output += tf.unsorted_segment_sum(atomic_en[:, 0], ind[:, 0], n_atoms)
    return output