示例#1
0
    def build(self,
              coord_,
              atype_,
              natoms,
              box_,
              mesh,
              davg=None,
              dstd=None,
              suffix='',
              reuse=None):

        with tf.variable_scope('descrpt_attr' + suffix, reuse=reuse):
            if davg is None:
                davg = np.zeros([self.ntypes, self.ndescrpt])
            if dstd is None:
                dstd = np.ones([self.ntypes, self.ndescrpt])
            t_rcut = tf.constant(np.max([self.rcut_r, self.rcut_a]),
                                 name='rcut',
                                 dtype=global_tf_float_precision)
            t_ntypes = tf.constant(self.ntypes, name='ntypes', dtype=tf.int32)
            self.t_avg = tf.get_variable(
                't_avg',
                davg.shape,
                dtype=global_tf_float_precision,
                trainable=False,
                initializer=tf.constant_initializer(davg))
            self.t_std = tf.get_variable(
                't_std',
                dstd.shape,
                dtype=global_tf_float_precision,
                trainable=False,
                initializer=tf.constant_initializer(dstd))

        coord = tf.reshape(coord_, [-1, natoms[1] * 3])
        box = tf.reshape(box_, [-1, 9])
        atype = tf.reshape(atype_, [-1, natoms[1]])

        self.descrpt, self.descrpt_deriv, self.rij, self.nlist \
            = op_module.descrpt_se_a (coord,
                                       atype,
                                       natoms,
                                       box,
                                       mesh,
                                       self.t_avg,
                                       self.t_std,
                                       rcut_a = self.rcut_a,
                                       rcut_r = self.rcut_r,
                                       rcut_r_smth = self.rcut_r_smth,
                                       sel_a = self.sel_a,
                                       sel_r = self.sel_r)

        self.descrpt_reshape = tf.reshape(self.descrpt, [-1, self.ndescrpt])

        self.dout, self.qmat = self._pass_filter(self.descrpt_reshape,
                                                 natoms,
                                                 suffix=suffix,
                                                 reuse=reuse,
                                                 trainable=self.trainable)

        return self.dout
示例#2
0
 def _pass_filter(self,
                  inputs,
                  atype,
                  natoms,
                  input_dict,
                  reuse=None,
                  suffix='',
                  trainable=True):
     start_index = 0
     inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
     output = []
     output_qmat = []
     inputs_i = inputs
     inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
     type_i = -1
     layer, qmat = self._filter(inputs_i,
                                type_i,
                                name='filter_type_all' + suffix,
                                natoms=natoms,
                                reuse=reuse,
                                trainable=trainable,
                                activation_fn=self.filter_activation_fn)
     layer = tf.reshape(
         layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()])
     # qmat  = tf.reshape(qmat,  [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3])
     output.append(layer)
     # output_qmat.append(qmat)
     output = tf.concat(output, axis=1)
     # output_qmat = tf.concat(output_qmat, axis = 1)
     return output, None
示例#3
0
 def build(self, coord_, atype_, natoms, box, mesh, suffix='', reuse=None):
     davg = self.davg
     dstd = self.dstd
     if davg is None:
         davg = [
             np.zeros([self.descrpt_a.ntypes, self.descrpt_a.ndescrpt]),
             np.zeros([self.descrpt_r.ntypes, self.descrpt_r.ndescrpt])
         ]
     if dstd is None:
         dstd = [
             np.ones([self.descrpt_a.ntypes, self.descrpt_a.ndescrpt]),
             np.ones([self.descrpt_r.ntypes, self.descrpt_r.ndescrpt])
         ]
     # dout
     self.dout_a = self.descrpt_a.build(coord_,
                                        atype_,
                                        natoms,
                                        box,
                                        mesh,
                                        suffix=suffix + '_a',
                                        reuse=reuse)
     self.dout_r = self.descrpt_r.build(coord_,
                                        atype_,
                                        natoms,
                                        box,
                                        mesh,
                                        suffix=suffix,
                                        reuse=reuse)
     self.dout_a = tf.reshape(self.dout_a,
                              [-1, self.descrpt_a.get_dim_out()])
     self.dout_r = tf.reshape(self.dout_r,
                              [-1, self.descrpt_r.get_dim_out()])
     self.dout = tf.concat([self.dout_a, self.dout_r], axis=1)
     self.dout = tf.reshape(self.dout, [-1, natoms[0] * self.get_dim_out()])
     return self.dout
示例#4
0
 def _pass_filter(self,
                  inputs,
                  natoms,
                  reuse=None,
                  suffix='',
                  trainable=True):
     start_index = 0
     inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
     shape = inputs.get_shape().as_list()
     output = []
     output_qmat = []
     for type_i in range(self.ntypes):
         inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt],
                             [-1, natoms[2 + type_i] * self.ndescrpt])
         inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
         layer, qmat = self._filter(inputs_i,
                                    name='filter_type_' + str(type_i) +
                                    suffix,
                                    natoms=natoms,
                                    reuse=reuse,
                                    seed=self.seed,
                                    trainable=trainable)
         layer = tf.reshape(
             layer,
             [tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out()])
         qmat = tf.reshape(qmat, [
             tf.shape(inputs)[0],
             natoms[2 + type_i] * self.get_dim_rot_mat_1() * 3
         ])
         output.append(layer)
         output_qmat.append(qmat)
         start_index += natoms[2 + type_i]
     output = tf.concat(output, axis=1)
     output_qmat = tf.concat(output_qmat, axis=1)
     return output, output_qmat
示例#5
0
    def comp_ef(self, dcoord, dbox, dtype, tnatoms, name, reuse=None):
        descrpt, descrpt_deriv, rij, nlist \
            = op_module.descrpt_se_r (dcoord,
                                      dtype,
                                      tnatoms,
                                      dbox,
                                      tf.constant(self.default_mesh),
                                      self.t_avg,
                                      self.t_std,
                                      rcut = self.rcut,
                                      rcut_smth = self.rcut_smth,
                                      sel = self.sel)
        inputs_reshape = tf.reshape(descrpt, [-1, self.ndescrpt])
        atom_ener = self._net(inputs_reshape, name, reuse=reuse)
        atom_ener_reshape = tf.reshape(atom_ener, [-1, self.natoms[0]])
        energy = tf.reduce_sum(atom_ener_reshape, axis=1)
        net_deriv_ = tf.gradients(atom_ener, inputs_reshape)
        net_deriv = net_deriv_[0]
        net_deriv_reshape = tf.reshape(net_deriv,
                                       [-1, self.natoms[0] * self.ndescrpt])

        force = op_module.prod_force_se_r(net_deriv_reshape, descrpt_deriv,
                                          nlist, tnatoms)
        virial, atom_vir = op_module.prod_virial_se_r(net_deriv_reshape,
                                                      descrpt_deriv, rij,
                                                      nlist, tnatoms)
        return energy, force, virial
示例#6
0
 def _pass_filter(self,
                  inputs,
                  natoms,
                  reuse=None,
                  suffix='',
                  trainable=True):
     start_index = 0
     inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
     output = []
     for type_i in range(self.ntypes):
         inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt],
                             [-1, natoms[2 + type_i] * self.ndescrpt])
         inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
         layer = self._filter_r(tf.cast(inputs_i, self.filter_precision),
                                type_i,
                                name='filter_type_' + str(type_i) + suffix,
                                natoms=natoms,
                                reuse=reuse,
                                seed=self.seed,
                                trainable=trainable,
                                activation_fn=self.filter_activation_fn)
         layer = tf.reshape(
             layer,
             [tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out()])
         output.append(layer)
         start_index += natoms[2 + type_i]
     output = tf.concat(output, axis=1)
     return output
示例#7
0
 def _pass_filter(self,
                  inputs,
                  atype,
                  natoms,
                  input_dict,
                  reuse=None,
                  suffix='',
                  trainable=True):
     # nf x na x ndescrpt
     # nf x na x (nnei x 4)
     inputs = tf.reshape(inputs, [-1, natoms[0], self.ndescrpt])
     layer, qmat = self._ebd_filter(tf.cast(inputs, self.filter_precision),
                                    atype,
                                    natoms,
                                    input_dict,
                                    name='filter_type_all' + suffix,
                                    reuse=reuse,
                                    seed=self.seed,
                                    trainable=trainable,
                                    activation_fn=self.filter_activation_fn)
     output = tf.reshape(
         layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()])
     output_qmat = tf.reshape(
         qmat,
         [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3])
     return output, output_qmat
示例#8
0
 def _type_embed(self,
                 atype,
                 ndim=1,
                 reuse=None,
                 suffix='',
                 trainable=True):
     ebd_type = tf.cast(atype, self.filter_precision)
     ebd_type = ebd_type / float(self.ntypes)
     ebd_type = tf.reshape(ebd_type, [-1, ndim])
     for ii in range(self.type_nlayer):
         name = 'type_embed_layer_' + str(ii)
         ebd_type = one_layer(ebd_type,
                              self.type_nchanl,
                              activation_fn=self.filter_activation_fn,
                              precision=self.filter_precision,
                              name=name,
                              reuse=reuse,
                              seed=self.seed + ii,
                              trainable=trainable)
     name = 'type_embed_layer_' + str(self.type_nlayer)
     ebd_type = one_layer(ebd_type,
                          self.type_nchanl,
                          activation_fn=None,
                          precision=self.filter_precision,
                          name=name,
                          reuse=reuse,
                          seed=self.seed + ii,
                          trainable=trainable)
     ebd_type = tf.reshape(ebd_type, [tf.shape(atype)[0], self.type_nchanl])
     return ebd_type
示例#9
0
    def _filter_r(self,
                  inputs,
                  type_input,
                  natoms,
                  activation_fn=tf.nn.tanh,
                  stddev=1.0,
                  bavg=0.0,
                  name='linear',
                  reuse=None,
                  trainable=True):
        # natom x nei
        outputs_size = [1] + self.filter_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x nei_type_i
                inputs_i = tf.slice(inputs, [0, start_index],
                                    [-1, self.sel_r[type_i]])
                start_index += self.sel_r[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 1
                xyz_scatter = tf.reshape(inputs_i, [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    xyz_scatter = embedding_net(
                        xyz_scatter,
                        self.filter_neuron,
                        self.filter_precision,
                        activation_fn=activation_fn,
                        resnet_dt=self.filter_resnet_dt,
                        name_suffix="_" + str(type_i),
                        stddev=stddev,
                        bavg=bavg,
                        seed=self.seed,
                        trainable=trainable,
                        uniform_seed=self.uniform_seed,
                        initial_variables=self.embedding_net_variables,
                    )
                    if (not self.uniform_seed) and (self.seed is not None):
                        self.seed += self.seed_shift
                    # natom x nei_type_i x out_size
                    xyz_scatter = tf.reshape(
                        xyz_scatter, (-1, shape_i[1], outputs_size[-1]))
                else:
                    natom = tf.shape(inputs)[0]
                    xyz_scatter = tf.cast(
                        tf.fill((natom, shape_i[1], outputs_size[-1]), 0.),
                        GLOBAL_TF_FLOAT_PRECISION)
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x outputs_size
            #
            res_rescale = 1. / 5.
            result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale

        return result
示例#10
0
 def _net(self, inputs, name, reuse=False):
     with tf.variable_scope(name, reuse=reuse):
         net_w = tf.get_variable('net_w', [self.ndescrpt],
                                 GLOBAL_TF_FLOAT_PRECISION,
                                 tf.constant_initializer(self.net_w_i))
     dot_v = tf.matmul(tf.reshape(inputs, [-1, self.ndescrpt]),
                       tf.reshape(net_w, [self.ndescrpt, 1]))
     return tf.reshape(dot_v, [-1])
 def _net(self, inputs, name, reuse=False):
     with tf.variable_scope(name, reuse=reuse):
         net_w = tf.get_variable('net_w', [self.ndescrpt],
                                 global_tf_float_precision,
                                 tf.constant_initializer(self.net_w_i))
     dot_v = tf.matmul(tf.reshape(inputs, [-1, self.ndescrpt]),
                       tf.reshape(net_w, [self.ndescrpt, 1]))
     return tf.reshape(dot_v, [-1])
示例#12
0
 def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''):
     inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]])
     outs = self.polar_fitting.build(input_d, rot_mat, natoms, reuse,
                                     suffix)
     # nframes x natoms x 9
     outs = tf.reshape(outs, [tf.shape(inputs)[0], -1, 9])
     outs = tf.reduce_sum(outs, axis=1)
     return tf.reshape(outs, [-1])
示例#13
0
文件: se_a.py 项目: njzjz/deepmd-kit
    def _concat_type_embedding(
        self,
        xyz_scatter,
        nframes,
        natoms,
        type_embedding,
    ):
        '''Concatenate `type_embedding` of neighbors and `xyz_scatter`.
        If not self.type_one_side, concatenate `type_embedding` of center atoms as well.

        Parameters
        ----------
        xyz_scatter:
                shape is [nframes*natoms[0]*self.nnei, 1]
        nframes:
                shape is []
        natoms:
                shape is [1+1+self.ntypes]
        type_embedding:
                shape is [self.ntypes, Y] where Y=jdata['type_embedding']['neuron'][-1]

        Returns
        -------
            embedding:
                environment of each atom represented by embedding.
        '''
        te_out_dim = type_embedding.get_shape().as_list()[-1]
        nei_embed = tf.nn.embedding_lookup(
            type_embedding,
            tf.cast(self.nei_type,
                    dtype=tf.int32))  # shape is [self.nnei, 1+te_out_dim]
        nei_embed = tf.tile(
            nei_embed,
            (nframes * natoms[0],
             1))  # shape is [nframes*natoms[0]*self.nnei, te_out_dim]
        nei_embed = tf.reshape(nei_embed, [-1, te_out_dim])
        embedding_input = tf.concat(
            [xyz_scatter, nei_embed],
            1)  # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim]
        if not self.type_one_side:
            atm_embed = embed_atom_type(
                self.ntypes, natoms,
                type_embedding)  # shape is [natoms[0], te_out_dim]
            atm_embed = tf.tile(
                atm_embed,
                (nframes, self.nnei
                 ))  # shape is [nframes*natoms[0], self.nnei*te_out_dim]
            atm_embed = tf.reshape(
                atm_embed,
                [-1, te_out_dim
                 ])  # shape is [nframes*natoms[0]*self.nnei, te_out_dim]
            embedding_input = tf.concat(
                [embedding_input, atm_embed], 1
            )  # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim+te_out_dim]
        return embedding_input
示例#14
0
    def build (self, 
               coord_ : tf.Tensor, 
               atype_ : tf.Tensor,
               natoms : tf.Tensor,
               box_ : tf.Tensor, 
               mesh : tf.Tensor,
               input_dict : dict, 
               reuse : bool = None,
               suffix : str = ''
    ) -> tf.Tensor:
        """
        Build the computational graph for the descriptor

        Parameters
        ----------
        coord_
                The coordinate of atoms
        atype_
                The type of atoms
        natoms
                The number of atoms. This tensor has the length of Ntypes + 2
                natoms[0]: number of local atoms
                natoms[1]: total number of atoms held by this processor
                natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
        mesh
                For historical reasons, only the length of the Tensor matters.
                if size of mesh == 6, pbc is assumed. 
                if size of mesh == 0, no-pbc is assumed. 
        input_dict
                Dictionary for additional inputs
        reuse
                The weights in the networks should be reused when get the variable.
        suffix
                Name suffix to identify this descriptor

        Returns
        -------
        descriptor
                The output descriptor
        """
        with tf.variable_scope('descrpt_attr' + suffix, reuse = reuse) :
            t_rcut = tf.constant(self.get_rcut(), 
                                 name = 'rcut', 
                                 dtype = GLOBAL_TF_FLOAT_PRECISION)
            t_ntypes = tf.constant(self.get_ntypes(), 
                                   name = 'ntypes', 
                                   dtype = tf.int32)
        all_dout = []
        for idx,ii in enumerate(self.descrpt_list):
            dout = ii.build(coord_, atype_, natoms, box_, mesh, input_dict, suffix=suffix+f'_{idx}', reuse=reuse)
            dout = tf.reshape(dout, [-1, ii.get_dim_out()])
            all_dout.append(dout)
        dout = tf.concat(all_dout, axis = 1)
        dout = tf.reshape(dout, [-1, natoms[0] * self.get_dim_out()])
        return dout
示例#15
0
def one_layer(inputs,
              outputs_size,
              activation_fn=tf.nn.tanh,
              precision=global_tf_float_precision,
              stddev=1.0,
              bavg=0.0,
              name='linear',
              reuse=None,
              seed=None,
              use_timestep=False,
              trainable=True,
              useBN=False):
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w = tf.get_variable(
            'matrix', [shape[1], outputs_size],
            precision,
            tf.random_normal_initializer(stddev=stddev /
                                         np.sqrt(shape[1] + outputs_size),
                                         seed=seed),
            trainable=trainable)
        b = tf.get_variable('bias', [outputs_size],
                            precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
        hidden = tf.matmul(inputs, w) + b
        if activation_fn != None and use_timestep:
            idt = tf.get_variable('idt', [outputs_size],
                                  precision,
                                  tf.random_normal_initializer(stddev=0.001,
                                                               mean=0.1,
                                                               seed=seed),
                                  trainable=trainable)
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
                # return activation_fn(hidden_bn)
            else:
                if use_timestep:
                    return tf.reshape(activation_fn(hidden),
                                      [-1, outputs_size]) * idt
                else:
                    return tf.reshape(activation_fn(hidden),
                                      [-1, outputs_size])
        else:
            if useBN:
                None
                # return self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
            else:
                return hidden
示例#16
0
 def _embedding_net(self,
                    inputs,
                    natoms,
                    filter_neuron,
                    activation_fn=tf.nn.tanh,
                    stddev=1.0,
                    bavg=0.0,
                    name='linear',
                    reuse=None,
                    seed=None,
                    trainable=True):
     '''
     inputs:  nf x na x (nei x 4)
     outputs: nf x na x nei x output_size
     '''
     # natom x (nei x 4)
     inputs = tf.reshape(inputs, [-1, self.ndescrpt])
     shape = inputs.get_shape().as_list()
     outputs_size = [1] + filter_neuron
     with tf.variable_scope(name, reuse=reuse):
         xyz_scatter_total = []
         # with natom x (nei x 4)
         inputs_i = inputs
         shape_i = inputs_i.get_shape().as_list()
         # with (natom x nei) x 4
         inputs_reshape = tf.reshape(inputs_i, [-1, 4])
         # with (natom x nei) x 1
         xyz_scatter = tf.reshape(tf.slice(inputs_reshape, [0, 0], [-1, 1]),
                                  [-1, 1])
         # with (natom x nei) x out_size
         xyz_scatter = embedding_net(xyz_scatter,
                                     self.filter_neuron,
                                     self.filter_precision,
                                     activation_fn=activation_fn,
                                     resnet_dt=self.filter_resnet_dt,
                                     stddev=stddev,
                                     bavg=bavg,
                                     seed=seed,
                                     trainable=trainable)
         # natom x nei x out_size
         xyz_scatter = tf.reshape(xyz_scatter,
                                  (-1, shape_i[1] // 4, outputs_size[-1]))
         xyz_scatter_total.append(xyz_scatter)
     # natom x nei x outputs_size
     xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
     # nf x natom x nei x outputs_size
     xyz_scatter = tf.reshape(
         xyz_scatter,
         [tf.shape(inputs)[0], natoms[0], self.nnei, outputs_size[-1]])
     return xyz_scatter
示例#17
0
文件: se_r.py 项目: njzjz/deepmd-kit
 def _pass_filter(self,
                  inputs,
                  natoms,
                  reuse=None,
                  suffix='',
                  trainable=True):
     start_index = 0
     inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
     output = []
     if not (self.type_one_side and len(self.exclude_types) == 0):
         for type_i in range(self.ntypes):
             inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt],
                                 [-1, natoms[2 + type_i] * self.ndescrpt])
             inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
             if self.type_one_side:
                 # reuse NN parameters for all types to support type_one_side along with exclude_types
                 reuse = tf.AUTO_REUSE
                 filter_name = 'filter_type_all' + suffix
             else:
                 filter_name = 'filter_type_' + str(type_i) + suffix
             layer = self._filter_r(inputs_i,
                                    type_i,
                                    name=filter_name,
                                    natoms=natoms,
                                    reuse=reuse,
                                    trainable=trainable,
                                    activation_fn=self.filter_activation_fn)
             layer = tf.reshape(layer, [
                 tf.shape(inputs)[0],
                 natoms[2 + type_i] * self.get_dim_out()
             ])
             output.append(layer)
             start_index += natoms[2 + type_i]
     else:
         inputs_i = inputs
         inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
         type_i = -1
         layer = self._filter_r(inputs_i,
                                type_i,
                                name='filter_type_all' + suffix,
                                natoms=natoms,
                                reuse=reuse,
                                trainable=trainable,
                                activation_fn=self.filter_activation_fn)
         layer = tf.reshape(
             layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()])
         output.append(layer)
     output = tf.concat(output, axis=1)
     return output
示例#18
0
 def comp_ef(self, dcoord, dbox, dtype, tnatoms, name, reuse=None):
     dout = self.descrpt.build(dcoord,
                               dtype,
                               tnatoms,
                               dbox,
                               self.default_mesh, {"efield": self.efield},
                               suffix=name,
                               reuse=reuse)
     inputs_reshape = tf.reshape(dout, [-1, self.descrpt.get_dim_out()])
     atom_ener = self._net(inputs_reshape, name, reuse=reuse)
     atom_ener_reshape = tf.reshape(atom_ener, [-1, self.natoms[0]])
     energy = tf.reduce_sum(atom_ener_reshape, axis=1)
     force, virial, av = self.descrpt.prod_force_virial(
         atom_ener_reshape, tnatoms)
     return energy, force, virial
示例#19
0
    def build(self,
              coord_,
              atype_,
              natoms,
              box,
              mesh,
              input_dict,
              suffix='',
              reuse=None):

        with tf.variable_scope('model_attr' + suffix, reuse=reuse):
            t_tmap = tf.constant(' '.join(self.type_map),
                                 name='tmap',
                                 dtype=tf.string)
            t_st = tf.constant(self.get_sel_type(),
                               name='sel_type',
                               dtype=tf.int32)
            t_mt = tf.constant(self.model_type,
                               name='model_type',
                               dtype=tf.string)

        coord = tf.reshape(coord_, [-1, natoms[1] * 3])
        atype = tf.reshape(atype_, [-1, natoms[1]])

        dout \
            = self.descrpt.build(coord_,
                                 atype_,
                                 natoms,
                                 box,
                                 mesh,
                                 davg = self.davg,
                                 dstd = self.dstd,
                                 suffix = suffix,
                                 reuse = reuse)
        dout = tf.identity(dout, name='o_descriptor')
        rot_mat = self.descrpt.get_rot_mat()
        rot_mat = tf.identity(rot_mat, name='o_rot_mat')

        polar = self.fitting.build(dout,
                                   rot_mat,
                                   natoms,
                                   reuse=reuse,
                                   suffix=suffix)
        polar = tf.identity(polar, name='o_polar')

        return {'polar': polar}
示例#20
0
 def build (self, 
            coord_, 
            atype_,
            natoms,
            box, 
            mesh,
            davg,
            dstd,
            suffix = '', 
            reuse = None):
     # dout
     self.dout_a = self.descrpt_a.build(coord_, atype_, natoms, box, mesh, davg[0], dstd[0], suffix=suffix+'_a', reuse=reuse)
     self.dout_r = self.descrpt_r.build(coord_, atype_, natoms, box, mesh, davg[1], dstd[1], suffix=suffix+'_r', reuse=reuse)
     self.dout_a = tf.reshape(self.dout_a, [-1, self.descrpt_a.get_dim_out()])
     self.dout_r = tf.reshape(self.dout_r, [-1, self.descrpt_r.get_dim_out()])
     self.dout = tf.concat([self.dout_a, self.dout_r], axis = 1)
     self.dout = tf.reshape(self.dout, [-1, natoms[0] * self.get_dim_out()])
     return self.dout
示例#21
0
    def build(
            self, 
            ntypes: int,
            reuse = None, 
            suffix = '',
    ):
        """
        Build the computational graph for the descriptor

        Parameters
        ----------
        ntypes
                Number of atom types.
        reuse
                The weights in the networks should be reused when get the variable.
        suffix
                Name suffix to identify this descriptor

        Returns
        -------
        embedded_types
                The computational graph for embedded types        
        """
        types = tf.convert_to_tensor(
            [ii for ii in range(ntypes)],
            dtype = tf.int32
        )
        ebd_type = tf.cast(tf.one_hot(tf.cast(types,dtype=tf.int32),int(ntypes)), self.filter_precision)
        ebd_type = tf.reshape(ebd_type, [-1, ntypes])
        name = 'type_embed_net' + suffix
        with tf.variable_scope(name, reuse=reuse):
            ebd_type = embedding_net(
                ebd_type,
                self.neuron,
                activation_fn = self.filter_activation_fn,
                precision = self.filter_precision,
                resnet_dt = self.filter_resnet_dt,
                seed = self.seed,
                trainable = self.trainable, 
                initial_variables = self.type_embedding_net_variables,
                uniform_seed = self.uniform_seed)
        ebd_type = tf.reshape(ebd_type, [-1, self.neuron[-1]]) # nnei * neuron[-1]
        self.ebd_type = tf.identity(ebd_type, name ='t_typeebd')
        return self.ebd_type 
示例#22
0
 def _pass_filter(self,
                  inputs,
                  natoms,
                  reuse=None,
                  suffix='',
                  trainable=True):
     start_index = 0
     inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
     output = []
     output_qmat = []
     if not self.type_one_side:
         for type_i in range(self.ntypes):
             inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt],
                                 [-1, natoms[2 + type_i] * self.ndescrpt])
             inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
             layer, qmat = self._filter(
                 tf.cast(inputs_i, self.filter_precision),
                 type_i,
                 name='filter_type_' + str(type_i) + suffix,
                 natoms=natoms,
                 reuse=reuse,
                 seed=self.seed,
                 trainable=trainable,
                 activation_fn=self.filter_activation_fn)
             layer = tf.reshape(layer, [
                 tf.shape(inputs)[0],
                 natoms[2 + type_i] * self.get_dim_out()
             ])
             qmat = tf.reshape(qmat, [
                 tf.shape(inputs)[0],
                 natoms[2 + type_i] * self.get_dim_rot_mat_1() * 3
             ])
             output.append(layer)
             output_qmat.append(qmat)
             start_index += natoms[2 + type_i]
     else:
         inputs_i = inputs
         inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
         type_i = -1
         layer, qmat = self._filter(tf.cast(inputs_i,
                                            self.filter_precision),
                                    type_i,
                                    name='filter_type_all' + suffix,
                                    natoms=natoms,
                                    reuse=reuse,
                                    seed=self.seed,
                                    trainable=trainable,
                                    activation_fn=self.filter_activation_fn)
         layer = tf.reshape(
             layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()])
         qmat = tf.reshape(qmat, [
             tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3
         ])
         output.append(layer)
         output_qmat.append(qmat)
     output = tf.concat(output, axis=1)
     output_qmat = tf.concat(output_qmat, axis=1)
     return output, output_qmat
    def comp_ef(self, dcoord, dbox, dtype, tnatoms, name, reuse=None):
        t_default_mesh = tf.constant(self.default_mesh)
        descrpt, descrpt_deriv, rij, nlist, axis, rot_mat \
            = op_module.descrpt (dcoord,
                                 dtype,
                                 tnatoms,
                                 dbox,
                                 t_default_mesh,
                                 self.t_avg,
                                 self.t_std,
                                 rcut_a = self.rcut_a,
                                 rcut_r = self.rcut_r,
                                 sel_a = self.sel_a,
                                 sel_r = self.sel_r,
                                 axis_rule = self.axis_rule)
        self.axis = axis
        self.nlist = nlist
        self.descrpt = descrpt
        inputs_reshape = tf.reshape(descrpt, [-1, self.ndescrpt])
        atom_ener = self._net(inputs_reshape, name, reuse=reuse)
        atom_ener_reshape = tf.reshape(atom_ener, [-1, self.natoms[0]])
        energy = tf.reduce_sum(atom_ener_reshape, axis=1)
        net_deriv_ = tf.gradients(atom_ener, inputs_reshape)
        net_deriv = net_deriv_[0]
        net_deriv_reshape = tf.reshape(net_deriv,
                                       [-1, self.natoms[0] * self.ndescrpt])

        force = op_module.prod_force(net_deriv_reshape,
                                     descrpt_deriv,
                                     nlist,
                                     axis,
                                     tnatoms,
                                     n_a_sel=self.nnei_a,
                                     n_r_sel=self.nnei_r)
        virial, atom_vir = op_module.prod_virial(net_deriv_reshape,
                                                 descrpt_deriv,
                                                 rij,
                                                 nlist,
                                                 axis,
                                                 tnatoms,
                                                 n_a_sel=self.nnei_a,
                                                 n_r_sel=self.nnei_r)
        return energy, force, virial
示例#24
0
    def build(self,
              input_d,
              rot_mat,
              natoms,
              reuse=None,
              suffix='') -> tf.Tensor:
        """
        Build the computational graph for fitting net
        
        Parameters
        ----------
        input_d
                The input descriptor
        rot_mat
                The rotation matrix from the descriptor.
        natoms
                The number of atoms. This tensor has the length of Ntypes + 2
                natoms[0]: number of local atoms
                natoms[1]: total number of atoms held by this processor
                natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
        reuse
                The weights in the networks should be reused when get the variable.
        suffix
                Name suffix to identify this descriptor

        Returns
        -------
        polar
                The system polarizability        
        """
        inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]])
        outs = self.polar_fitting.build(input_d, rot_mat, natoms, reuse,
                                        suffix)
        # nframes x natoms x 9
        outs = tf.reshape(outs, [tf.shape(inputs)[0], -1, 9])
        outs = tf.reduce_sum(outs, axis=1)
        tf.summary.histogram('fitting_net_output', outs)
        return tf.reshape(outs, [-1])
示例#25
0
    def prod_force_virial(
            self, atom_ener: tf.Tensor,
            natoms: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
        """
        Compute force and virial

        Parameters
        ----------
        atom_ener
                The atomic energy
        natoms
                The number of atoms. This tensor has the length of Ntypes + 2
                natoms[0]: number of local atoms
                natoms[1]: total number of atoms held by this processor
                natoms[i]: 2 <= i < Ntypes+2, number of type i atoms

        Returns
        -------
        force
                The force on atoms
        virial
                The total virial
        atom_virial
                The atomic virial
        """
        [net_deriv] = tf.gradients(atom_ener, self.descrpt)
        tf.summary.histogram('net_derivative', net_deriv)
        net_deriv_reshape = tf.reshape(net_deriv,
                                       [-1, natoms[0] * self.ndescrpt])
        force = op_module.prod_force(net_deriv_reshape,
                                     self.descrpt_deriv,
                                     self.nlist,
                                     self.axis,
                                     natoms,
                                     n_a_sel=self.nnei_a,
                                     n_r_sel=self.nnei_r)
        virial, atom_virial \
            = op_module.prod_virial (net_deriv_reshape,
                                     self.descrpt_deriv,
                                     self.rij,
                                     self.nlist,
                                     self.axis,
                                     natoms,
                                     n_a_sel = self.nnei_a,
                                     n_r_sel = self.nnei_r)
        tf.summary.histogram('force', force)
        tf.summary.histogram('virial', virial)
        tf.summary.histogram('atom_virial', atom_virial)

        return force, virial, atom_virial
示例#26
0
 def build_efv(self, dcoord, dbox, dtype, tnatoms, name, op, reuse=None):
     efield = tf.reshape(self.efield, [-1, 3])
     efield = self._normalize_3d(efield)
     efield = tf.reshape(efield, [-1, tnatoms[0] * 3])
     if op != op_module.prod_env_mat_a:
         descrpt = DescrptSeAEfLower(
             op, **{
                 'sel': self.sel_a,
                 'rcut': 6,
                 'rcut_smth': 5.5,
                 'seed': 1,
                 'uniform_seed': True
             })
     else:
         descrpt = DescrptSeA(
             **{
                 'sel': self.sel_a,
                 'rcut': 6,
                 'rcut_smth': 0.5,
                 'seed': 1,
                 'uniform_seed': True
             })
     dout = descrpt.build(dcoord,
                          dtype,
                          tnatoms,
                          dbox,
                          tf.constant(self.default_mesh),
                          {'efield': efield},
                          suffix=name,
                          reuse=reuse)
     dout = tf.reshape(dout, [-1, descrpt.get_dim_out()])
     atom_ener = tf.reduce_sum(dout, axis=1)
     atom_ener_reshape = tf.reshape(atom_ener, [-1, self.natoms[0]])
     energy = tf.reduce_sum(atom_ener_reshape, axis=1)
     force, virial, atom_vir \
         = descrpt.prod_force_virial (atom_ener, tnatoms)
     return energy, force, virial, atom_ener, atom_vir
示例#27
0
文件: polar.py 项目: njzjz/deepmd-kit
    def build (self, 
               input_d,
               rot_mat,
               natoms,
               reuse = None,
               suffix = '') :
        start_index = 0
        inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]])

        count = 0
        outs_list = []
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice (inputs,
                                 [ 0, start_index*      self.dim_descrpt],
                                 [-1, natoms[2+type_i]* self.dim_descrpt] )
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice (rot_mat,
                                  [ 0, start_index*      9],
                                  [-1, natoms[2+type_i]* 9] )
            rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3])
            start_index += natoms[2+type_i]
            if not type_i in self.sel_type :
                continue
            layer = inputs_i
            for ii in range(0,len(self.n_neuron)) :
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
                    layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision)
                else :
                    layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision)
            # (nframes x natoms) x 9
            final_layer = one_layer(layer, 9, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, final_layer = True)
            # (nframes x natoms) x 3 x 3
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 3, 3])
            # (nframes x natoms) x 3 x 3
            final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1])
            # (nframes x natoms) x 3 x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # (nframes x natoms) x 3(coord) x 3(coord)
            final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True)
            # nframes x natoms x 3 x 3
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3])

            # concat the results
            outs_list.append(final_layer)
            count += 1
        outs = tf.concat(outs_list, axis = 1)

        tf.summary.histogram('fitting_net_output', outs)
        return tf.cast(tf.reshape(outs, [-1]),  GLOBAL_TF_FLOAT_PRECISION)
示例#28
0
 def prod_force_virial(self, atom_ener, natoms) :
     [net_deriv] = tf.gradients (atom_ener, self.descrpt_reshape)
     net_deriv_reshape = tf.reshape (net_deriv, [-1, natoms[0] * self.ndescrpt])        
     force \
         = op_module.prod_force_se_r (net_deriv_reshape,
                                      self.descrpt_deriv,
                                      self.nlist,
                                      natoms)
     virial, atom_virial \
         = op_module.prod_virial_se_r (net_deriv_reshape,
                                       self.descrpt_deriv,
                                       self.rij,
                                       self.nlist,
                                       natoms)
     return force, virial, atom_virial
示例#29
0
    def build(self, learning_rate, natoms, model_dict, label_dict, suffix):
        coord = model_dict['coord']
        energy = model_dict['energy']
        atom_ener = model_dict['atom_ener']
        nframes = tf.shape(atom_ener)[0]
        natoms = tf.shape(atom_ener)[1]
        # build energy dipole
        atom_ener0 = atom_ener - tf.reshape(
            tf.tile(
                tf.reshape(energy / global_cvt_2_ener_float(natoms), [-1, 1]),
                [1, natoms]), [nframes, natoms])
        coord = tf.reshape(coord, [nframes, natoms, 3])
        atom_ener0 = tf.reshape(atom_ener0, [nframes, 1, natoms])
        ener_dipole = tf.matmul(atom_ener0, coord)
        ener_dipole = tf.reshape(ener_dipole, [nframes, 3])

        energy_hat = label_dict['energy']
        ener_dipole_hat = label_dict['energy_dipole']
        find_energy = label_dict['find_energy']
        find_ener_dipole = label_dict['find_energy_dipole']

        l2_ener_loss = tf.reduce_mean(tf.square(energy - energy_hat),
                                      name='l2_' + suffix)

        ener_dipole_reshape = tf.reshape(ener_dipole, [-1])
        ener_dipole_hat_reshape = tf.reshape(ener_dipole_hat, [-1])
        l2_ener_dipole_loss = tf.reduce_mean(
            tf.square(ener_dipole_reshape - ener_dipole_hat_reshape),
            name='l2_' + suffix)

        # atom_norm_ener  = 1./ global_cvt_2_ener_float(natoms[0])
        atom_norm_ener = 1. / global_cvt_2_ener_float(natoms)
        pref_e = global_cvt_2_ener_float(
            find_energy * (self.limit_pref_e +
                           (self.start_pref_e - self.limit_pref_e) *
                           learning_rate / self.starter_learning_rate))
        pref_ed = global_cvt_2_tf_float(
            find_ener_dipole * (self.limit_pref_ed +
                                (self.start_pref_ed - self.limit_pref_ed) *
                                learning_rate / self.starter_learning_rate))

        l2_loss = 0
        more_loss = {}
        l2_loss += atom_norm_ener * (pref_e * l2_ener_loss)
        l2_loss += global_cvt_2_ener_float(pref_ed * l2_ener_dipole_loss)
        more_loss['l2_ener_loss'] = l2_ener_loss
        more_loss['l2_ener_dipole_loss'] = l2_ener_dipole_loss

        self.l2_l = l2_loss
        self.l2_more = more_loss
        return l2_loss, more_loss
示例#30
0
文件: wfc.py 项目: y1xiaoc/deepmd-kit
    def build (self, 
               input_d,
               rot_mat,
               natoms,
               reuse = None,
               suffix = '') :
        start_index = 0
        inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]])

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice (inputs,
                                 [ 0, start_index*      self.dim_descrpt],
                                 [-1, natoms[2+type_i]* self.dim_descrpt] )
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice (rot_mat,
                                  [ 0, start_index*      9],
                                  [-1, natoms[2+type_i]* 9] )
            rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3])
            start_index += natoms[2+type_i]
            if not type_i in self.sel_type :
                continue
            layer = inputs_i
            for ii in range(0,len(self.n_neuron)) :
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
                    layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
                else :
                    layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
                if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
            # (nframes x natoms) x (nwfc x 3)
            final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
            if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
            # (nframes x natoms) x nwfc(wc) x 3(coord_local)
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.wfc_numb, 3])
            # (nframes x natoms) x nwfc(wc) x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # nframes x natoms x nwfc(wc) x 3(coord_local)
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], self.wfc_numb, 3])

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis = 1)
            count += 1
        
        tf.summary.histogram('fitting_net_output', outs)
        return tf.cast(tf.reshape(outs, [-1]),  GLOBAL_TF_FLOAT_PRECISION)