def test_nopbc_self_built_nlist(self): tem, tem_deriv, trij, tnlist \ = op_module.prod_env_mat_a ( self.tcoord, self.ttype, self.tnatoms, self.tbox, tf.constant(np.zeros(0, dtype = np.int32)), self.t_avg, self.t_std, rcut_a = -1, rcut_r = self.rcut, rcut_r_smth = self.rcut_smth, sel_a = self.sel, sel_r = [0, 0]) self.sess.run(tf.global_variables_initializer()) dem, dem_deriv, drij, dnlist = self.sess.run( [tem, tem_deriv, trij, tnlist], feed_dict={ self.tcoord: self.dcoord, self.ttype: self.dtype, self.tbox: self.dbox, self.tnatoms: self.dnatoms }) self.assertEqual(dem.shape, (self.nframes, self.nloc * self.ndescrpt)) self.assertEqual(dem_deriv.shape, (self.nframes, self.nloc * self.ndescrpt * 3)) self.assertEqual(drij.shape, (self.nframes, self.nloc * self.nnei * 3)) self.assertEqual(dnlist.shape, (self.nframes, self.nloc * self.nnei)) for ff in range(self.nframes): np.testing.assert_almost_equal(dem[ff], self.nopbc_expected_output, 5)
def test_nopbc_self_built_nlist_deriv(self): hh = 1e-4 tem, tem_deriv, trij, tnlist \ = op_module.prod_env_mat_a ( self.tcoord, self.ttype, self.tnatoms, self.tbox, tf.constant(np.zeros(0, dtype = np.int32)), self.t_avg, self.t_std, rcut_a = -1, rcut_r = self.rcut, rcut_r_smth = self.rcut_smth, sel_a = self.sel, sel_r = [0, 0]) self.sess.run(tf.global_variables_initializer()) self.check_deriv_numerical_deriv(hh, tem, tem_deriv, trij, tnlist)
def comp_ef(self, dcoord, dbox, dtype, tnatoms, name, reuse=None): descrpt, descrpt_deriv, rij, nlist \ = op_module.prod_env_mat_a (dcoord, dtype, tnatoms, dbox, tf.constant(self.default_mesh), self.t_avg, self.t_std, rcut_a = self.rcut_a, rcut_r = self.rcut_r, rcut_r_smth = self.rcut_r_smth, sel_a = self.sel_a, sel_r = self.sel_r) inputs_reshape = tf.reshape(descrpt, [-1, self.ndescrpt]) atom_ener = self._net(inputs_reshape, name, reuse=reuse) atom_ener_reshape = tf.reshape(atom_ener, [-1, self.natoms[0]]) energy = tf.reduce_sum(atom_ener_reshape, axis=1) net_deriv_ = tf.gradients(atom_ener, inputs_reshape) net_deriv = net_deriv_[0] net_deriv_reshape = tf.reshape(net_deriv, [-1, self.natoms[0] * self.ndescrpt]) force = op_module.prod_force_se_a(net_deriv_reshape, descrpt_deriv, nlist, tnatoms, n_a_sel=self.nnei_a, n_r_sel=self.nnei_r) virial, atom_vir = op_module.prod_virial_se_a(net_deriv_reshape, descrpt_deriv, rij, nlist, tnatoms, n_a_sel=self.nnei_a, n_r_sel=self.nnei_r) return energy, force, virial
def build(self, coord_: tf.Tensor, atype_: tf.Tensor, natoms: tf.Tensor, box_: tf.Tensor, mesh: tf.Tensor, input_dict: dict, reuse: bool = None, suffix: str = '') -> tf.Tensor: """ Build the computational graph for the descriptor Parameters ---------- coord_ The coordinate of atoms atype_ The type of atoms natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms mesh For historical reasons, only the length of the Tensor matters. if size of mesh == 6, pbc is assumed. if size of mesh == 0, no-pbc is assumed. input_dict Dictionary for additional inputs reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- descriptor The output descriptor """ davg = self.davg dstd = self.dstd with tf.variable_scope('descrpt_attr' + suffix, reuse=reuse): if davg is None: davg = np.zeros([self.ntypes, self.ndescrpt]) if dstd is None: dstd = np.ones([self.ntypes, self.ndescrpt]) t_rcut = tf.constant(np.max([self.rcut_r, self.rcut_a]), name='rcut', dtype=GLOBAL_TF_FLOAT_PRECISION) t_ntypes = tf.constant(self.ntypes, name='ntypes', dtype=tf.int32) t_ndescrpt = tf.constant(self.ndescrpt, name='ndescrpt', dtype=tf.int32) t_sel = tf.constant(self.sel_a, name='sel', dtype=tf.int32) t_original_sel = tf.constant(self.original_sel if self.original_sel is not None else self.sel_a, name='original_sel', dtype=tf.int32) self.t_avg = tf.get_variable( 't_avg', davg.shape, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(davg)) self.t_std = tf.get_variable( 't_std', dstd.shape, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(dstd)) with tf.control_dependencies([t_sel, t_original_sel]): coord = tf.reshape(coord_, [-1, natoms[1] * 3]) box = tf.reshape(box_, [-1, 9]) atype = tf.reshape(atype_, [-1, natoms[1]]) self.descrpt, self.descrpt_deriv, self.rij, self.nlist \ = op_module.prod_env_mat_a (coord, atype, natoms, box, mesh, self.t_avg, self.t_std, rcut_a = self.rcut_a, rcut_r = self.rcut_r, rcut_r_smth = self.rcut_r_smth, sel_a = self.sel_a, sel_r = self.sel_r) # only used when tensorboard was set as true tf.summary.histogram('descrpt', self.descrpt) tf.summary.histogram('rij', self.rij) tf.summary.histogram('nlist', self.nlist) self.descrpt_reshape = tf.reshape(self.descrpt, [-1, self.ndescrpt]) self._identity_tensors(suffix=suffix) self.dout, self.qmat = self._pass_filter(self.descrpt_reshape, atype, natoms, input_dict, suffix=suffix, reuse=reuse, trainable=self.trainable) # only used when tensorboard was set as true tf.summary.histogram('embedding_net_output', self.dout) return self.dout
def __init__(self, rcut: float, rcut_smth: float, sel: List[str], neuron: List[int] = [24, 48, 96], axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, seed: int = None, type_one_side: bool = True, exclude_types: List[List[int]] = [], set_davg_zero: bool = False, activation_function: str = 'tanh', precision: str = 'default', uniform_seed: bool = False) -> None: """ Constructor """ if rcut < rcut_smth: raise RuntimeError( "rcut_smth (%f) should be no more than rcut (%f)!" % (rcut_smth, rcut)) self.sel_a = sel self.rcut_r = rcut self.rcut_r_smth = rcut_smth self.filter_neuron = neuron self.n_axis_neuron = axis_neuron self.filter_resnet_dt = resnet_dt self.seed = seed self.uniform_seed = uniform_seed self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron) self.trainable = trainable self.compress_activation_fn = get_activation_func(activation_function) self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) self.exclude_types = set() for tt in exclude_types: assert (len(tt) == 2) self.exclude_types.add((tt[0], tt[1])) self.exclude_types.add((tt[1], tt[0])) self.set_davg_zero = set_davg_zero self.type_one_side = type_one_side # descrpt config self.sel_r = [0 for ii in range(len(self.sel_a))] self.ntypes = len(self.sel_a) assert (self.ntypes == len(self.sel_r)) self.rcut_a = -1 # numb of neighbors and numb of descrptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r self.ndescrpt_a = self.nnei_a * 4 self.ndescrpt_r = self.nnei_r * 1 self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r self.useBN = False self.dstd = None self.davg = None self.compress = False self.embedding_net_variables = None self.mixed_prec = None self.place_holders = {} nei_type = np.array([]) for ii in range(self.ntypes): nei_type = np.append(nei_type, ii * np.ones(self.sel_a[ii])) # like a mask self.nei_type = tf.constant(nei_type, dtype=tf.int32) avg_zero = np.zeros([self.ntypes, self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION) std_ones = np.ones([self.ntypes, self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION) sub_graph = tf.Graph() with sub_graph.as_default(): name_pfx = 'd_sea_' for ii in ['coord', 'box']: self.place_holders[ii] = tf.placeholder( GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + 't_' + ii) self.place_holders['type'] = tf.placeholder(tf.int32, [None, None], name=name_pfx + 't_type') self.place_holders['natoms_vec'] = tf.placeholder( tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms') self.place_holders['default_mesh'] = tf.placeholder( tf.int32, [None], name=name_pfx + 't_mesh') self.stat_descrpt, descrpt_deriv, rij, nlist \ = op_module.prod_env_mat_a(self.place_holders['coord'], self.place_holders['type'], self.place_holders['natoms_vec'], self.place_holders['box'], self.place_holders['default_mesh'], tf.constant(avg_zero), tf.constant(std_ones), rcut_a = self.rcut_a, rcut_r = self.rcut_r, rcut_r_smth = self.rcut_r_smth, sel_a = self.sel_a, sel_r = self.sel_r) self.sub_sess = tf.Session(graph=sub_graph, config=default_tf_session_config) self.original_sel = None
def __init__(self, rcut: float, rcut_smth: float, sel: List[str], neuron: List[int] = [24, 48, 96], resnet_dt: bool = False, trainable: bool = True, seed: int = None, set_davg_zero: bool = False, activation_function: str = 'tanh', precision: str = 'default', uniform_seed: bool = False) -> None: """ Constructor """ self.sel_a = sel self.rcut_r = rcut self.rcut_r_smth = rcut_smth self.filter_neuron = neuron self.filter_resnet_dt = resnet_dt self.seed = seed self.uniform_seed = uniform_seed self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron) self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) # self.exclude_types = set() # for tt in exclude_types: # assert(len(tt) == 2) # self.exclude_types.add((tt[0], tt[1])) # self.exclude_types.add((tt[1], tt[0])) self.set_davg_zero = set_davg_zero # descrpt config self.sel_r = [0 for ii in range(len(self.sel_a))] self.ntypes = len(self.sel_a) assert (self.ntypes == len(self.sel_r)) self.rcut_a = -1 # numb of neighbors and numb of descrptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r self.ndescrpt_a = self.nnei_a * 4 self.ndescrpt_r = self.nnei_r * 1 self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r self.useBN = False self.dstd = None self.davg = None self.place_holders = {} avg_zero = np.zeros([self.ntypes, self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION) std_ones = np.ones([self.ntypes, self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION) sub_graph = tf.Graph() with sub_graph.as_default(): name_pfx = 'd_sea_' for ii in ['coord', 'box']: self.place_holders[ii] = tf.placeholder( GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + 't_' + ii) self.place_holders['type'] = tf.placeholder(tf.int32, [None, None], name=name_pfx + 't_type') self.place_holders['natoms_vec'] = tf.placeholder( tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms') self.place_holders['default_mesh'] = tf.placeholder( tf.int32, [None], name=name_pfx + 't_mesh') self.stat_descrpt, descrpt_deriv, rij, nlist \ = op_module.prod_env_mat_a(self.place_holders['coord'], self.place_holders['type'], self.place_holders['natoms_vec'], self.place_holders['box'], self.place_holders['default_mesh'], tf.constant(avg_zero), tf.constant(std_ones), rcut_a = self.rcut_a, rcut_r = self.rcut_r, rcut_r_smth = self.rcut_r_smth, sel_a = self.sel_a, sel_r = self.sel_r) self.sub_sess = tf.Session(graph=sub_graph, config=default_tf_session_config)
def comp_ef(self, dcoord, dbox, dtype, tnatoms, name, reuse=None): descrpt, descrpt_deriv, rij, nlist \ = op_module.prod_env_mat_a (dcoord, dtype, tnatoms, dbox, tf.constant(self.default_mesh), self.t_avg, self.t_std, rcut_a = self.rcut_a, rcut_r = self.rcut_r, rcut_r_smth = self.rcut_r_smth, sel_a = self.sel_a, sel_r = self.sel_r) inputs_reshape = tf.reshape(descrpt, [-1, self.ndescrpt]) atom_ener = self._net(inputs_reshape, name, reuse=reuse) sw_lambda, sw_deriv \ = op_module.soft_min_switch(dtype, rij, nlist, tnatoms, sel_a = self.sel_a, sel_r = self.sel_r, alpha = self.smin_alpha, rmin = self.sw_rmin, rmax = self.sw_rmax) inv_sw_lambda = 1.0 - sw_lambda tab_atom_ener, tab_force, tab_atom_virial \ = op_module.pair_tab( self.tab_info, self.tab_data, dtype, rij, nlist, tnatoms, sw_lambda, sel_a = self.sel_a, sel_r = self.sel_r) energy_diff = tab_atom_ener - tf.reshape(atom_ener, [-1, self.natoms[0]]) tab_atom_ener = tf.reshape(sw_lambda, [-1]) * tf.reshape( tab_atom_ener, [-1]) atom_ener = tf.reshape(inv_sw_lambda, [-1]) * atom_ener energy_raw = tab_atom_ener + atom_ener energy_raw = tf.reshape(energy_raw, [-1, self.natoms[0]]) energy = tf.reduce_sum(energy_raw, axis=1) net_deriv_ = tf.gradients(atom_ener, inputs_reshape) net_deriv = net_deriv_[0] net_deriv_reshape = tf.reshape(net_deriv, [-1, self.natoms[0] * self.ndescrpt]) force = op_module.prod_force_se_a(net_deriv_reshape, descrpt_deriv, nlist, tnatoms, n_a_sel=self.nnei_a, n_r_sel=self.nnei_r) sw_force \ = op_module.soft_min_force(energy_diff, sw_deriv, nlist, tnatoms, n_a_sel = self.nnei_a, n_r_sel = self.nnei_r) force = force + sw_force + tab_force virial, atom_vir = op_module.prod_virial_se_a(net_deriv_reshape, descrpt_deriv, rij, nlist, tnatoms, n_a_sel=self.nnei_a, n_r_sel=self.nnei_r) sw_virial, sw_atom_virial \ = op_module.soft_min_virial (energy_diff, sw_deriv, rij, nlist, tnatoms, n_a_sel = self.nnei_a, n_r_sel = self.nnei_r) # atom_virial = atom_virial + sw_atom_virial + tab_atom_virial virial = virial + sw_virial \ + tf.reduce_sum(tf.reshape(tab_atom_virial, [-1, self.natoms[1], 9]), axis = 1) return energy, force, virial