def _pass_filter(self, inputs, natoms, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) shape = inputs.get_shape().as_list() output = [] output_qmat = [] for type_i in range(self.ntypes): inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt], [-1, natoms[2 + type_i] * self.ndescrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) layer, qmat = self._filter(inputs_i, name='filter_type_' + str(type_i) + suffix, natoms=natoms, reuse=reuse, seed=self.seed, trainable=trainable) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out()]) qmat = tf.reshape(qmat, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_rot_mat_1() * 3 ]) output.append(layer) output_qmat.append(qmat) start_index += natoms[2 + type_i] output = tf.concat(output, axis=1) output_qmat = tf.concat(output_qmat, axis=1) return output, output_qmat
def _pass_filter(self, inputs, natoms, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] output_qmat = [] if not self.type_one_side: for type_i in range(self.ntypes): inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt], [-1, natoms[2 + type_i] * self.ndescrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) layer, qmat = self._filter( tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_' + str(type_i) + suffix, natoms=natoms, reuse=reuse, seed=self.seed, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape(layer, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out() ]) qmat = tf.reshape(qmat, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_rot_mat_1() * 3 ]) output.append(layer) output_qmat.append(qmat) start_index += natoms[2 + type_i] else: inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all' + suffix, natoms=natoms, reuse=reuse, seed=self.seed, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) qmat = tf.reshape(qmat, [ tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3 ]) output.append(layer) output_qmat.append(qmat) output = tf.concat(output, axis=1) output_qmat = tf.concat(output_qmat, axis=1) return output, output_qmat
def _concat_type_embedding( self, xyz_scatter, nframes, natoms, type_embedding, ): '''Concatenate `type_embedding` of neighbors and `xyz_scatter`. If not self.type_one_side, concatenate `type_embedding` of center atoms as well. Parameters ---------- xyz_scatter: shape is [nframes*natoms[0]*self.nnei, 1] nframes: shape is [] natoms: shape is [1+1+self.ntypes] type_embedding: shape is [self.ntypes, Y] where Y=jdata['type_embedding']['neuron'][-1] Returns ------- embedding: environment of each atom represented by embedding. ''' te_out_dim = type_embedding.get_shape().as_list()[-1] nei_embed = tf.nn.embedding_lookup( type_embedding, tf.cast(self.nei_type, dtype=tf.int32)) # shape is [self.nnei, 1+te_out_dim] nei_embed = tf.tile( nei_embed, (nframes * natoms[0], 1)) # shape is [nframes*natoms[0]*self.nnei, te_out_dim] nei_embed = tf.reshape(nei_embed, [-1, te_out_dim]) embedding_input = tf.concat( [xyz_scatter, nei_embed], 1) # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim] if not self.type_one_side: atm_embed = embed_atom_type( self.ntypes, natoms, type_embedding) # shape is [natoms[0], te_out_dim] atm_embed = tf.tile( atm_embed, (nframes, self.nnei )) # shape is [nframes*natoms[0], self.nnei*te_out_dim] atm_embed = tf.reshape( atm_embed, [-1, te_out_dim ]) # shape is [nframes*natoms[0]*self.nnei, te_out_dim] embedding_input = tf.concat( [embedding_input, atm_embed], 1 ) # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim+te_out_dim] return embedding_input
def build(self, coord_, atype_, natoms, box, mesh, suffix='', reuse=None): davg = self.davg dstd = self.dstd if davg is None: davg = [ np.zeros([self.descrpt_a.ntypes, self.descrpt_a.ndescrpt]), np.zeros([self.descrpt_r.ntypes, self.descrpt_r.ndescrpt]) ] if dstd is None: dstd = [ np.ones([self.descrpt_a.ntypes, self.descrpt_a.ndescrpt]), np.ones([self.descrpt_r.ntypes, self.descrpt_r.ndescrpt]) ] # dout self.dout_a = self.descrpt_a.build(coord_, atype_, natoms, box, mesh, suffix=suffix + '_a', reuse=reuse) self.dout_r = self.descrpt_r.build(coord_, atype_, natoms, box, mesh, suffix=suffix, reuse=reuse) self.dout_a = tf.reshape(self.dout_a, [-1, self.descrpt_a.get_dim_out()]) self.dout_r = tf.reshape(self.dout_r, [-1, self.descrpt_r.get_dim_out()]) self.dout = tf.concat([self.dout_a, self.dout_r], axis=1) self.dout = tf.reshape(self.dout, [-1, natoms[0] * self.get_dim_out()]) return self.dout
def _pass_filter(self, inputs, atype, natoms, input_dict, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] output_qmat = [] inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 layer, qmat = self._filter(inputs_i, type_i, name='filter_type_all' + suffix, natoms=natoms, reuse=reuse, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) # qmat = tf.reshape(qmat, [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3]) output.append(layer) # output_qmat.append(qmat) output = tf.concat(output, axis=1) # output_qmat = tf.concat(output_qmat, axis = 1) return output, None
def _pass_filter(self, inputs, natoms, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] for type_i in range(self.ntypes): inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt], [-1, natoms[2 + type_i] * self.ndescrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_' + str(type_i) + suffix, natoms=natoms, reuse=reuse, seed=self.seed, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out()]) output.append(layer) start_index += natoms[2 + type_i] output = tf.concat(output, axis=1) return output
def _filter_r(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, trainable=True): # natom x nei outputs_size = [1] + self.filter_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x nei_type_i inputs_i = tf.slice(inputs, [0, start_index], [-1, self.sel_r[type_i]]) start_index += self.sel_r[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 1 xyz_scatter = tf.reshape(inputs_i, [-1, 1]) if (type_input, type_i) not in self.exclude_types: xyz_scatter = embedding_net( xyz_scatter, self.filter_neuron, self.filter_precision, activation_fn=activation_fn, resnet_dt=self.filter_resnet_dt, name_suffix="_" + str(type_i), stddev=stddev, bavg=bavg, seed=self.seed, trainable=trainable, uniform_seed=self.uniform_seed, initial_variables=self.embedding_net_variables, ) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # natom x nei_type_i x out_size xyz_scatter = tf.reshape( xyz_scatter, (-1, shape_i[1], outputs_size[-1])) else: natom = tf.shape(inputs)[0] xyz_scatter = tf.cast( tf.fill((natom, shape_i[1], outputs_size[-1]), 0.), GLOBAL_TF_FLOAT_PRECISION) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x outputs_size # res_rescale = 1. / 5. result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale return result
def _slice_descrpt_deriv(self, deriv): coll = [] start_idx = 0 for type_i in range(self.ntypes): if type_i in self.sel_type: di = tf.slice(deriv, [0, start_idx * self.ndescrpt], [-1, self.t_natoms[2 + type_i] * self.ndescrpt]) coll.append(di) start_idx += self.t_natoms[2 + type_i] return tf.concat(coll, axis=1)
def build (self, coord_ : tf.Tensor, atype_ : tf.Tensor, natoms : tf.Tensor, box_ : tf.Tensor, mesh : tf.Tensor, input_dict : dict, reuse : bool = None, suffix : str = '' ) -> tf.Tensor: """ Build the computational graph for the descriptor Parameters ---------- coord_ The coordinate of atoms atype_ The type of atoms natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms mesh For historical reasons, only the length of the Tensor matters. if size of mesh == 6, pbc is assumed. if size of mesh == 0, no-pbc is assumed. input_dict Dictionary for additional inputs reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- descriptor The output descriptor """ with tf.variable_scope('descrpt_attr' + suffix, reuse = reuse) : t_rcut = tf.constant(self.get_rcut(), name = 'rcut', dtype = GLOBAL_TF_FLOAT_PRECISION) t_ntypes = tf.constant(self.get_ntypes(), name = 'ntypes', dtype = tf.int32) all_dout = [] for idx,ii in enumerate(self.descrpt_list): dout = ii.build(coord_, atype_, natoms, box_, mesh, input_dict, suffix=suffix+f'_{idx}', reuse=reuse) dout = tf.reshape(dout, [-1, ii.get_dim_out()]) all_dout.append(dout) dout = tf.concat(all_dout, axis = 1) dout = tf.reshape(dout, [-1, natoms[0] * self.get_dim_out()]) return dout
def build (self, input_d, rot_mat, natoms, reuse = None, suffix = '') : start_index = 0 inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]]) count = 0 outs_list = [] for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice (inputs, [ 0, start_index* self.dim_descrpt], [-1, natoms[2+type_i]* self.dim_descrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice (rot_mat, [ 0, start_index* 9], [-1, natoms[2+type_i]* 9] ) rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3]) start_index += natoms[2+type_i] if not type_i in self.sel_type : continue layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) else : layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) # (nframes x natoms) x 9 final_layer = one_layer(layer, 9, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, final_layer = True) # (nframes x natoms) x 3 x 3 final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 3, 3]) # (nframes x natoms) x 3 x 3 final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1]) # (nframes x natoms) x 3 x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # (nframes x natoms) x 3(coord) x 3(coord) final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True) # nframes x natoms x 3 x 3 final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3]) # concat the results outs_list.append(final_layer) count += 1 outs = tf.concat(outs_list, axis = 1) tf.summary.histogram('fitting_net_output', outs) return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
def build (self, input_d, rot_mat, natoms, reuse = None, suffix = '') : start_index = 0 inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice (inputs, [ 0, start_index* self.dim_descrpt], [-1, natoms[2+type_i]* self.dim_descrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice (rot_mat, [ 0, start_index* 9], [-1, natoms[2+type_i]* 9] ) rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3]) start_index += natoms[2+type_i] if not type_i in self.sel_type : continue layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) else : layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x (nwfc x 3) final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x nwfc(wc) x 3(coord_local) final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.wfc_numb, 3]) # (nframes x natoms) x nwfc(wc) x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # nframes x natoms x nwfc(wc) x 3(coord_local) final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], self.wfc_numb, 3]) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis = 1) count += 1 tf.summary.histogram('fitting_net_output', outs) return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
def _enrich(self, dipole, dof = 3): coll = [] sel_start_idx = 0 for type_i in range(self.ntypes): if type_i in self.sel_type: di = tf.slice(dipole, [ 0, sel_start_idx * dof], [-1, self.t_natoms[2+type_i] * dof]) sel_start_idx += self.t_natoms[2+type_i] else: di = tf.zeros([tf.shape(dipole)[0], self.t_natoms[2+type_i] * dof], dtype = global_tf_float_precision) coll.append(di) return tf.concat(coll, axis = 1)
def _embedding_net(self, inputs, natoms, filter_neuron, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, trainable=True): ''' inputs: nf x na x (nei x 4) outputs: nf x na x nei x output_size ''' # natom x (nei x 4) inputs = tf.reshape(inputs, [-1, self.ndescrpt]) shape = inputs.get_shape().as_list() outputs_size = [1] + filter_neuron with tf.variable_scope(name, reuse=reuse): xyz_scatter_total = [] # with natom x (nei x 4) inputs_i = inputs shape_i = inputs_i.get_shape().as_list() # with (natom x nei) x 4 inputs_reshape = tf.reshape(inputs_i, [-1, 4]) # with (natom x nei) x 1 xyz_scatter = tf.reshape(tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1]) # with (natom x nei) x out_size xyz_scatter = embedding_net(xyz_scatter, self.filter_neuron, self.filter_precision, activation_fn=activation_fn, resnet_dt=self.filter_resnet_dt, stddev=stddev, bavg=bavg, seed=seed, trainable=trainable) # natom x nei x out_size xyz_scatter = tf.reshape(xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1])) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # nf x natom x nei x outputs_size xyz_scatter = tf.reshape( xyz_scatter, [tf.shape(inputs)[0], natoms[0], self.nnei, outputs_size[-1]]) return xyz_scatter
def _enrich(self, dipole, dof=3): coll = [] sel_start_idx = 0 for type_i in range(self.ntypes): if type_i in self.sel_type: di = tf.slice(dipole, [0, sel_start_idx * dof], [-1, self.t_natoms[2 + type_i] * dof]) sel_start_idx += self.t_natoms[2 + type_i] else: di = tf.zeros( [tf.shape(dipole)[0], self.t_natoms[2 + type_i] * dof], dtype=GLOBAL_TF_FLOAT_PRECISION) coll.append(di) return tf.concat(coll, axis=1)
def _pass_filter(self, inputs, natoms, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] if not (self.type_one_side and len(self.exclude_types) == 0): for type_i in range(self.ntypes): inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt], [-1, natoms[2 + type_i] * self.ndescrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) if self.type_one_side: # reuse NN parameters for all types to support type_one_side along with exclude_types reuse = tf.AUTO_REUSE filter_name = 'filter_type_all' + suffix else: filter_name = 'filter_type_' + str(type_i) + suffix layer = self._filter_r(inputs_i, type_i, name=filter_name, natoms=natoms, reuse=reuse, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape(layer, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out() ]) output.append(layer) start_index += natoms[2 + type_i] else: inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 layer = self._filter_r(inputs_i, type_i, name='filter_type_all' + suffix, natoms=natoms, reuse=reuse, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) output.append(layer) output = tf.concat(output, axis=1) return output
def build (self, coord_, atype_, natoms, box, mesh, davg, dstd, suffix = '', reuse = None): # dout self.dout_a = self.descrpt_a.build(coord_, atype_, natoms, box, mesh, davg[0], dstd[0], suffix=suffix+'_a', reuse=reuse) self.dout_r = self.descrpt_r.build(coord_, atype_, natoms, box, mesh, davg[1], dstd[1], suffix=suffix+'_r', reuse=reuse) self.dout_a = tf.reshape(self.dout_a, [-1, self.descrpt_a.get_dim_out()]) self.dout_r = tf.reshape(self.dout_r, [-1, self.descrpt_r.get_dim_out()]) self.dout = tf.concat([self.dout_a, self.dout_r], axis = 1) self.dout = tf.reshape(self.dout, [-1, natoms[0] * self.get_dim_out()]) return self.dout
def embed_atom_type( ntypes: int, natoms: tf.Tensor, type_embedding: tf.Tensor, ): """ Make the embedded type for the atoms in system. The atoms are assumed to be sorted according to the type, thus their types are described by a `tf.Tensor` natoms, see explanation below. Parameters ---------- ntypes: Number of types. natoms: The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms type_embedding: The type embedding. It has the shape of [ntypes, embedding_dim] Returns ------- atom_embedding The embedded type of each atom. It has the shape of [numb_atoms, embedding_dim] """ te_out_dim = type_embedding.get_shape().as_list()[-1] atype = [] for ii in range(ntypes): atype.append(tf.tile([ii], [natoms[2 + ii]])) atype = tf.concat(atype, axis=0) atm_embed = tf.nn.embedding_lookup( type_embedding, tf.cast(atype, dtype=tf.int32)) #(nf*natom)*nchnl atm_embed = tf.reshape(atm_embed, [-1, te_out_dim]) return atm_embed
def build(self, coord_: tf.Tensor, atype_: tf.Tensor, natoms: tf.Tensor, box_: tf.Tensor, mesh: tf.Tensor, input_dict: dict, reuse: bool = None, suffix: str = '') -> tf.Tensor: """ Build the computational graph for the descriptor Parameters ---------- coord_ The coordinate of atoms atype_ The type of atoms natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms mesh For historical reasons, only the length of the Tensor matters. if size of mesh == 6, pbc is assumed. if size of mesh == 0, no-pbc is assumed. input_dict Dictionary for additional inputs. Should have 'efield'. reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- descriptor The output descriptor """ self.dout_vert = self.descrpt_vert.build(coord_, atype_, natoms, box_, mesh, input_dict) self.dout_para = self.descrpt_para.build(coord_, atype_, natoms, box_, mesh, input_dict, reuse=True) coord = tf.reshape(coord_, [-1, natoms[1] * 3]) nframes = tf.shape(coord)[0] self.dout_vert = tf.reshape( self.dout_vert, [nframes * natoms[0], self.descrpt_vert.get_dim_out()]) self.dout_para = tf.reshape( self.dout_para, [nframes * natoms[0], self.descrpt_para.get_dim_out()]) self.dout = tf.concat([self.dout_vert, self.dout_para], axis=1) self.dout = tf.reshape(self.dout, [nframes, natoms[0] * self.get_dim_out()]) self.qmat = self.descrpt_vert.qmat + self.descrpt_para.qmat tf.summary.histogram('embedding_net_output', self.dout) return self.dout
def _build_fv_graph_inner(self): self.t_ef = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_ef') nf = 10 nfxnas = 64 * nf nfxna = 192 * nf nf = -1 nfxnas = -1 nfxna = -1 self.t_box_reshape = tf.reshape(self.t_box, [-1, 9]) t_nframes = tf.shape(self.t_box_reshape)[0] # (nframes x natoms_sel) x 1 x 3 self.t_ef_reshape = tf.reshape(self.t_ef, [nfxnas, 1, 3]) # (nframes x natoms) x ndescrpt self.descrpt = self.graph.get_tensor_by_name( os.path.join(self.modifier_prefix, 'o_rmat:0')) self.descrpt_deriv = self.graph.get_tensor_by_name( os.path.join(self.modifier_prefix, 'o_rmat_deriv:0')) self.nlist = self.graph.get_tensor_by_name( os.path.join(self.modifier_prefix, 'o_nlist:0')) self.rij = self.graph.get_tensor_by_name( os.path.join(self.modifier_prefix, 'o_rij:0')) # self.descrpt_reshape = tf.reshape(self.descrpt, [nf, 192 * self.ndescrpt]) # self.descrpt_deriv = tf.reshape(self.descrpt_deriv, [nf, 192 * self.ndescrpt * 3]) # nframes x (natoms_sel x 3) self.t_tensor_reshpe = tf.reshape(self.t_tensor, [t_nframes, -1]) # nframes x (natoms x 3) self.t_tensor_reshpe = self._enrich(self.t_tensor_reshpe, dof=3) # (nframes x natoms) x 3 self.t_tensor_reshpe = tf.reshape(self.t_tensor_reshpe, [nfxna, 3]) # (nframes x natoms) x 1 self.t_dipole_x = tf.slice(self.t_tensor_reshpe, [0, 0], [nfxna, 1]) self.t_dipole_y = tf.slice(self.t_tensor_reshpe, [0, 1], [nfxna, 1]) self.t_dipole_z = tf.slice(self.t_tensor_reshpe, [0, 2], [nfxna, 1]) self.t_dipole_z = tf.reshape(self.t_dipole_z, [nfxna, 1]) # (nframes x natoms) x ndescrpt [self.t_dipole_x_d] = tf.gradients(self.t_dipole_x, self.descrpt) [self.t_dipole_y_d] = tf.gradients(self.t_dipole_y, self.descrpt) [self.t_dipole_z_d] = tf.gradients(self.t_dipole_z, self.descrpt) # nframes x (natoms x ndescrpt) self.t_dipole_x_d = tf.reshape(self.t_dipole_x_d, [-1, self.t_natoms[0] * self.ndescrpt]) self.t_dipole_y_d = tf.reshape(self.t_dipole_y_d, [-1, self.t_natoms[0] * self.ndescrpt]) self.t_dipole_z_d = tf.reshape(self.t_dipole_z_d, [-1, self.t_natoms[0] * self.ndescrpt]) # nframes x (natoms_sel x ndescrpt) self.t_dipole_x_d = self._slice_descrpt_deriv(self.t_dipole_x_d) self.t_dipole_y_d = self._slice_descrpt_deriv(self.t_dipole_y_d) self.t_dipole_z_d = self._slice_descrpt_deriv(self.t_dipole_z_d) # (nframes x natoms_sel) x ndescrpt self.t_dipole_x_d = tf.reshape(self.t_dipole_x_d, [nfxnas, self.ndescrpt]) self.t_dipole_y_d = tf.reshape(self.t_dipole_y_d, [nfxnas, self.ndescrpt]) self.t_dipole_z_d = tf.reshape(self.t_dipole_z_d, [nfxnas, self.ndescrpt]) # (nframes x natoms_sel) x 3 x ndescrpt self.t_dipole_d = tf.concat( [self.t_dipole_x_d, self.t_dipole_y_d, self.t_dipole_z_d], axis=1) self.t_dipole_d = tf.reshape(self.t_dipole_d, [nfxnas, 3 * self.ndescrpt]) # (nframes x natoms_sel) x 3 x ndescrpt self.t_dipole_d = tf.reshape(self.t_dipole_d, [-1, 3, self.ndescrpt]) # (nframes x natoms_sel) x 1 x ndescrpt self.t_ef_d = tf.matmul(self.t_ef_reshape, self.t_dipole_d) # nframes x (natoms_sel x ndescrpt) self.t_ef_d = tf.reshape(self.t_ef_d, [t_nframes, -1]) # nframes x (natoms x ndescrpt) self.t_ef_d = self._enrich(self.t_ef_d, dof=self.ndescrpt) self.t_ef_d = tf.reshape(self.t_ef_d, [nf, self.t_natoms[0] * self.ndescrpt]) # t_ef_d is force (with -1), prod_forc takes deriv, so we need the opposite self.t_ef_d_oppo = -self.t_ef_d force = op_module.prod_force_se_a(self.t_ef_d_oppo, self.descrpt_deriv, self.nlist, self.t_natoms, n_a_sel=self.nnei_a, n_r_sel=self.nnei_r) virial, atom_virial \ = op_module.prod_virial_se_a (self.t_ef_d_oppo, self.descrpt_deriv, self.rij, self.nlist, self.t_natoms, n_a_sel = self.nnei_a, n_r_sel = self.nnei_r) force = tf.identity(force, name='o_dm_force') virial = tf.identity(virial, name='o_dm_virial') atom_virial = tf.identity(atom_virial, name='o_dm_av') return force, virial, atom_virial
def build( self, inputs: tf.Tensor, natoms: tf.Tensor, input_dict: dict = None, reuse: bool = None, suffix: str = '', ) -> tf.Tensor: """ Build the computational graph for fitting net Parameters ---------- inputs The input descriptor input_dict Additional dict for inputs. if numb_fparam > 0, should have input_dict['fparam'] if numb_aparam > 0, should have input_dict['aparam'] natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- ener The system energy """ if input_dict is None: input_dict = {} bias_atom_e = self.bias_atom_e if self.numb_fparam > 0 and (self.fparam_avg is None or self.fparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) if self.numb_aparam > 0 and (self.aparam_avg is None or self.aparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) with tf.variable_scope('fitting_attr' + suffix, reuse=reuse): t_dfparam = tf.constant(self.numb_fparam, name='dfparam', dtype=tf.int32) t_daparam = tf.constant(self.numb_aparam, name='daparam', dtype=tf.int32) if self.numb_fparam > 0: t_fparam_avg = tf.get_variable( 't_fparam_avg', self.numb_fparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.fparam_avg)) t_fparam_istd = tf.get_variable( 't_fparam_istd', self.numb_fparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.fparam_inv_std)) if self.numb_aparam > 0: t_aparam_avg = tf.get_variable( 't_aparam_avg', self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.aparam_avg)) t_aparam_istd = tf.get_variable( 't_aparam_istd', self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.aparam_inv_std)) inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) if len(self.atom_ener): # only for atom_ener nframes = input_dict.get('nframes') if nframes is not None: # like inputs, but we don't want to add a dependency on inputs inputs_zero = tf.zeros((nframes, self.dim_descrpt * natoms[0]), dtype=self.fitting_precision) else: inputs_zero = tf.zeros_like(inputs, dtype=self.fitting_precision) if bias_atom_e is not None: assert (len(bias_atom_e) == self.ntypes) fparam = None aparam = None if self.numb_fparam > 0: fparam = input_dict['fparam'] fparam = tf.reshape(fparam, [-1, self.numb_fparam]) fparam = (fparam - t_fparam_avg) * t_fparam_istd if self.numb_aparam > 0: aparam = input_dict['aparam'] aparam = tf.reshape(aparam, [-1, self.numb_aparam]) aparam = (aparam - t_aparam_avg) * t_aparam_istd aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) type_embedding = input_dict.get('type_embedding', None) if type_embedding is not None: atype_embed = embed_atom_type(self.ntypes, natoms, type_embedding) atype_embed = tf.tile(atype_embed, [tf.shape(inputs)[0], 1]) else: atype_embed = None if atype_embed is None: start_index = 0 outs_list = [] for type_i in range(self.ntypes): if bias_atom_e is None: type_bias_ae = 0.0 else: type_bias_ae = bias_atom_e[type_i] final_layer = self._build_lower(start_index, natoms[2 + type_i], inputs, fparam, aparam, bias_atom_e=type_bias_ae, suffix='_type_' + str(type_i) + suffix, reuse=reuse) # concat the results if type_i < len( self.atom_ener) and self.atom_ener[type_i] is not None: zero_layer = self._build_lower(start_index, natoms[2 + type_i], inputs_zero, fparam, aparam, bias_atom_e=type_bias_ae, suffix='_type_' + str(type_i) + suffix, reuse=True) final_layer += self.atom_ener[type_i] - zero_layer final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i]]) outs_list.append(final_layer) start_index += natoms[2 + type_i] # concat the results # concat once may be faster than multiple concat outs = tf.concat(outs_list, axis=1) # with type embedding else: if len(self.atom_ener) > 0: raise RuntimeError( "setting atom_ener is not supported by type embedding") atype_embed = tf.cast(atype_embed, self.fitting_precision) type_shape = atype_embed.get_shape().as_list() inputs = tf.concat( [tf.reshape(inputs, [-1, self.dim_descrpt]), atype_embed], axis=1) self.dim_descrpt = self.dim_descrpt + type_shape[1] inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) final_layer = self._build_lower(0, natoms[0], inputs, fparam, aparam, bias_atom_e=0.0, suffix=suffix, reuse=reuse) outs = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[0]]) # add atom energy bias; TF will broadcast to all batches # tf.repeat is avaiable in TF>=2.1 or TF 1.15 _TF_VERSION = Version(TF_VERSION) if (Version('1.15') <= _TF_VERSION < Version('2') or _TF_VERSION >= Version('2.1')) and self.bias_atom_e is not None: outs += tf.repeat( tf.Variable(self.bias_atom_e, dtype=self.fitting_precision, trainable=False, name="bias_atom_ei"), natoms[2:]) if self.tot_ener_zero: force_tot_ener = 0.0 outs = tf.reshape(outs, [-1, natoms[0]]) outs_mean = tf.reshape(tf.reduce_mean(outs, axis=1), [-1, 1]) outs_mean = outs_mean - tf.ones_like( outs_mean, dtype=GLOBAL_TF_FLOAT_PRECISION) * ( force_tot_ener / global_cvt_2_tf_float(natoms[0])) outs = outs - outs_mean outs = tf.reshape(outs, [-1]) tf.summary.histogram('fitting_net_output', outs) return tf.reshape(outs, [-1])
def _build_lower(self, start_index, natoms, inputs, fparam=None, aparam=None, bias_atom_e=0.0, suffix='', reuse=None): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) layer = inputs_i if fparam is not None: ext_fparam = tf.tile(fparam, [1, natoms]) ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) ext_fparam = tf.cast(ext_fparam, self.fitting_precision) layer = tf.concat([layer, ext_fparam], axis=1) if aparam is not None: ext_aparam = tf.slice(aparam, [0, start_index * self.numb_aparam], [-1, natoms * self.numb_aparam]) ext_aparam = tf.reshape(ext_aparam, [-1, self.numb_aparam]) ext_aparam = tf.cast(ext_aparam, self.fitting_precision) layer = tf.concat([layer, ext_aparam], axis=1) for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii], uniform_seed=self.uniform_seed, initial_variables=self.fitting_net_variables, mixed_prec=self.mixed_prec) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii], uniform_seed=self.uniform_seed, initial_variables=self.fitting_net_variables, mixed_prec=self.mixed_prec) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift final_layer = one_layer(layer, 1, activation_fn=None, bavg=bias_atom_e, name='final_layer' + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision, trainable=self.trainable[-1], uniform_seed=self.uniform_seed, initial_variables=self.fitting_net_variables, mixed_prec=self.mixed_prec, final_layer=True) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift return final_layer
def embedding_net(xx, network_size, precision, activation_fn=tf.nn.tanh, resnet_dt=False, name_suffix='', stddev=1.0, bavg=0.0, seed=None, trainable=True, uniform_seed=False, initial_variables=None, mixed_prec=None): r"""The embedding network. The embedding network function :math:`\mathcal{N}` is constructed by is the composition of multiple layers :math:`\mathcal{L}^{(i)}`: .. math:: \mathcal{N} = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)} \circ \cdots \circ \mathcal{L}^{(1)} A layer :math:`\mathcal{L}` is given by one of the following forms, depending on the number of nodes: [1]_ .. math:: \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})= \begin{cases} \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + \mathbf{x}, & N_2=N_1 \\ \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + (\mathbf{x}, \mathbf{x}), & N_2 = 2N_1\\ \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}), & \text{otherwise} \\ \end{cases} where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}` is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and :math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively, both of which are trainable if `trainable` is `True`. :math:`\boldsymbol{\phi}` is the activation function. Parameters ---------- xx : Tensor Input tensor :math:`\mathbf{x}` of shape [-1,1] network_size: list of int Size of the embedding network. For example [16,32,64] precision: Precision of network weights. For example, tf.float64 activation_fn: Activation function :math:`\boldsymbol{\phi}` resnet_dt: boolean Using time-step in the ResNet construction name_suffix: str The name suffix append to each variable. stddev: float Standard deviation of initializing network parameters bavg: float Mean of network intial bias seed: int Random seed for initializing network parameters trainable: boolean If the network is trainable uniform_seed : boolean Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed initial_variables : dict The input dict which stores the embedding net variables mixed_prec The input dict which stores the mixed precision setting for the embedding net References ---------- .. [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identitymappings in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer International Publishing, 2016. """ input_shape = xx.get_shape().as_list() outputs_size = [input_shape[1]] + network_size for ii in range(1, len(outputs_size)): w_initializer = tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed if (seed is None or uniform_seed) else seed + ii * 3 + 0) b_initializer = tf.random_normal_initializer( stddev=stddev, mean=bavg, seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 1) if initial_variables is not None: scope = tf.get_variable_scope().name w_initializer = tf.constant_initializer( initial_variables[scope + '/matrix_' + str(ii) + name_suffix]) b_initializer = tf.constant_initializer( initial_variables[scope + '/bias_' + str(ii) + name_suffix]) w = tf.get_variable('matrix_' + str(ii) + name_suffix, [outputs_size[ii - 1], outputs_size[ii]], precision, w_initializer, trainable=trainable) variable_summaries(w, 'matrix_' + str(ii) + name_suffix) b = tf.get_variable('bias_' + str(ii) + name_suffix, [outputs_size[ii]], precision, b_initializer, trainable=trainable) variable_summaries(b, 'bias_' + str(ii) + name_suffix) if mixed_prec is not None: xx = tf.cast(xx, get_precision(mixed_prec['compute_prec'])) w = tf.cast(w, get_precision(mixed_prec['compute_prec'])) b = tf.cast(b, get_precision(mixed_prec['compute_prec'])) hidden = tf.reshape(activation_fn(tf.nn.bias_add(tf.matmul(xx, w), b)), [-1, outputs_size[ii]]) if resnet_dt: idt_initializer = tf.random_normal_initializer( stddev=0.001, mean=1.0, seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 2) if initial_variables is not None: scope = tf.get_variable_scope().name idt_initializer = tf.constant_initializer( initial_variables[scope + '/idt_' + str(ii) + name_suffix]) idt = tf.get_variable('idt_' + str(ii) + name_suffix, [1, outputs_size[ii]], precision, idt_initializer, trainable=trainable) variable_summaries(idt, 'idt_' + str(ii) + name_suffix) if mixed_prec is not None: idt = tf.cast(idt, get_precision(mixed_prec['compute_prec'])) if outputs_size[ii] == outputs_size[ii - 1]: if resnet_dt: xx += hidden * idt else: xx += hidden elif outputs_size[ii] == outputs_size[ii - 1] * 2: if resnet_dt: xx = tf.concat([xx, xx], 1) + hidden * idt else: xx = tf.concat([xx, xx], 1) + hidden else: xx = hidden if mixed_prec is not None: xx = tf.cast(xx, get_precision(mixed_prec['output_prec'])) return xx
def build(self, inputs, input_dict, natoms, reuse=None, suffix=''): bias_atom_e = self.bias_atom_e if self.numb_fparam > 0 and (self.fparam_avg is None or self.fparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) if self.numb_aparam > 0 and (self.aparam_avg is None or self.aparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) with tf.variable_scope('fitting_attr' + suffix, reuse=reuse): t_dfparam = tf.constant(self.numb_fparam, name='dfparam', dtype=tf.int32) t_daparam = tf.constant(self.numb_aparam, name='daparam', dtype=tf.int32) if self.numb_fparam > 0: t_fparam_avg = tf.get_variable( 't_fparam_avg', self.numb_fparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.fparam_avg)) t_fparam_istd = tf.get_variable( 't_fparam_istd', self.numb_fparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.fparam_inv_std)) if self.numb_aparam > 0: t_aparam_avg = tf.get_variable( 't_aparam_avg', self.numb_aparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.aparam_avg)) t_aparam_istd = tf.get_variable( 't_aparam_istd', self.numb_aparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.aparam_inv_std)) start_index = 0 inputs = tf.cast( tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) if bias_atom_e is not None: assert (len(bias_atom_e) == self.ntypes) if self.numb_fparam > 0: fparam = input_dict['fparam'] fparam = tf.reshape(fparam, [-1, self.numb_fparam]) fparam = (fparam - t_fparam_avg) * t_fparam_istd if self.numb_aparam > 0: aparam = input_dict['aparam'] aparam = tf.reshape(aparam, [-1, self.numb_aparam]) aparam = (aparam - t_aparam_avg) * t_aparam_istd aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) layer = inputs_i if self.numb_fparam > 0: ext_fparam = tf.tile(fparam, [1, natoms[2 + type_i]]) ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) layer = tf.concat([layer, ext_fparam], axis=1) if self.numb_aparam > 0: ext_aparam = tf.slice( aparam, [0, start_index * self.numb_aparam], [-1, natoms[2 + type_i] * self.numb_aparam]) ext_aparam = tf.reshape(ext_aparam, [-1, self.numb_aparam]) layer = tf.concat([layer, ext_aparam], axis=1) start_index += natoms[2 + type_i] if bias_atom_e is None: type_bias_ae = 0.0 else: type_bias_ae = bias_atom_e[type_i] for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) final_layer = one_layer(layer, 1, activation_fn=None, bavg=type_bias_ae, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision, trainable=self.trainable[-1]) if type_i < len( self.atom_ener) and self.atom_ener[type_i] is not None: inputs_zero = tf.zeros_like(inputs_i, dtype=global_tf_float_precision) layer = inputs_zero if self.numb_fparam > 0: layer = tf.concat([layer, ext_fparam], axis=1) if self.numb_aparam > 0: layer = tf.concat([layer, ext_aparam], axis=1) for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=True, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) else: layer = one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=True, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) zero_layer = one_layer(layer, 1, activation_fn=None, bavg=type_bias_ae, name='final_layer_type_' + str(type_i) + suffix, reuse=True, seed=self.seed, precision=self.fitting_precision, trainable=self.trainable[-1]) final_layer += self.atom_ener[type_i] - zero_layer final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2 + type_i]]) # concat the results if type_i == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
def _filter(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, trainable=True): # natom x (nei x 4) shape = inputs.get_shape().as_list() outputs_size = [1] + self.filter_neuron outputs_size_2 = self.n_axis_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, self.sel_a[type_i] * 4]) start_index += self.sel_a[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 4 inputs_reshape = tf.reshape(inputs_i, [-1, 4]) xyz_scatter = tf.reshape( tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1]) if (type_input, type_i) not in self.exclude_types: for ii in range(1, len(outputs_size)): w = tf.get_variable( 'matrix_' + str(ii) + '_' + str(type_i), [outputs_size[ii - 1], outputs_size[ii]], self.filter_precision, tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed), trainable=trainable) b = tf.get_variable( 'bias_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=stddev, mean=bavg, seed=seed), trainable=trainable) if self.filter_resnet_dt: idt = tf.get_variable( 'idt_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=0.001, mean=1.0, seed=seed), trainable=trainable) if outputs_size[ii] == outputs_size[ii - 1]: if self.filter_resnet_dt: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) elif outputs_size[ii] == outputs_size[ii - 1] * 2: if self.filter_resnet_dt: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) else: xyz_scatter = activation_fn( tf.matmul(xyz_scatter, w) + b) else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=global_tf_float_precision) xyz_scatter = tf.matmul(xyz_scatter, w) # natom x nei_type_i x out_size xyz_scatter = tf.reshape( xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1])) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x nei x 4 inputs_reshape = tf.reshape(inputs, [-1, shape[1] // 4, 4]) # natom x 4 x outputs_size xyz_scatter_1 = tf.matmul(inputs_reshape, xyz_scatter, transpose_a=True) xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape[1]) # natom x 4 x outputs_size_2 xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0], [-1, -1, outputs_size_2]) # # natom x 3 x outputs_size_2 # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1]) # natom x 3 x outputs_size_1 qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1]) # natom x outputs_size_2 x 3 qmat = tf.transpose(qmat, perm=[0, 2, 1]) # natom x outputs_size x outputs_size_2 result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True) # natom x (outputs_size x outputs_size_2) result = tf.reshape(result, [-1, outputs_size_2 * outputs_size[-1]]) return result, qmat
def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''): start_index = 0 inputs = tf.cast( tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat], [-1, natoms[2 + type_i] * self.dim_rot_mat]) rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) start_index += natoms[2 + type_i] if not type_i in self.sel_type: continue layer = inputs_i for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) if self.fit_diag: bavg = np.zeros(self.dim_rot_mat_1) # bavg[0] = self.avgeig[0] # bavg[1] = self.avgeig[1] # bavg[2] = self.avgeig[2] # (nframes x natoms) x naxis final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, bavg=bavg, precision=self.fitting_precision) # (nframes x natoms) x naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1 ]) # (nframes x natoms) x naxis x naxis final_layer = tf.matrix_diag(final_layer) else: bavg = np.zeros(self.dim_rot_mat_1 * self.dim_rot_mat_1) # bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0] # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1] # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2] # (nframes x natoms) x (naxis x naxis) final_layer = one_layer( layer, self.dim_rot_mat_1 * self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, bavg=bavg, precision=self.fitting_precision) # (nframes x natoms) x naxis x naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1, self.dim_rot_mat_1 ]) # (nframes x natoms) x naxis x naxis final_layer = final_layer + tf.transpose(final_layer, perm=[0, 2, 1]) # (nframes x natoms) x naxis x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # (nframes x natoms) x 3(coord) x 3(coord) final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a=True) # nframes x natoms x 3 x 3 final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3, 3]) # shift and scale sel_type_idx = self.sel_type.index(type_i) final_layer = final_layer * self.scale[sel_type_idx] final_layer = final_layer + self.diag_shift[sel_type_idx] * tf.eye( 3, batch_shape=[tf.shape(inputs)[0], natoms[2 + type_i]], dtype=global_tf_float_precision) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) count += 1 return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''): start_index = 0 inputs = tf.cast( tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat], [-1, natoms[2 + type_i] * self.dim_rot_mat]) rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) start_index += natoms[2 + type_i] if not type_i in self.sel_type: continue layer = inputs_i for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) # (nframes x natoms) x naxis final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision) # (nframes x natoms) x 1 * naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], 1, self.dim_rot_mat_1 ]) # (nframes x natoms) x 1 x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # nframes x natoms x 3 final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3]) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) count += 1 return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
def _filter_r(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, trainable=True): # natom x nei outputs_size = [1] + self.filter_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x nei_type_i inputs_i = tf.slice(inputs, [0, start_index], [-1, self.sel_r[type_i]]) start_index += self.sel_r[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 1 xyz_scatter = tf.reshape(inputs_i, [-1, 1]) if (type_input, type_i) not in self.exclude_types: for ii in range(1, len(outputs_size)): w = tf.get_variable( 'matrix_' + str(ii) + '_' + str(type_i), [outputs_size[ii - 1], outputs_size[ii]], self.filter_precision, tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed), trainable=trainable) b = tf.get_variable( 'bias_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=stddev, mean=bavg, seed=seed), trainable=trainable) if self.filter_resnet_dt: idt = tf.get_variable( 'idt_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=0.001, mean=1.0, seed=seed), trainable=trainable) if outputs_size[ii] == outputs_size[ii - 1]: if self.filter_resnet_dt: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) elif outputs_size[ii] == outputs_size[ii - 1] * 2: if self.filter_resnet_dt: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) else: xyz_scatter = activation_fn( tf.matmul(xyz_scatter, w) + b) else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=global_tf_float_precision) xyz_scatter = tf.matmul(xyz_scatter, w) # natom x nei_type_i x out_size xyz_scatter = tf.reshape(xyz_scatter, (-1, shape_i[1], outputs_size[-1])) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x outputs_size # res_rescale = 1. / 5. result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale return result
def _layer_1(self, x, w, b): t = tf.concat([x, x], axis=1) return t, self.activation_fn(tf.matmul(x, w) + b) + t
def _filter_type_ext(self, inputs, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, trainable=True): # natom x (nei x 4) outputs_size = [1] + self.filter_neuron outputs_size_2 = self.n_axis_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 result_all = [] xyz_scatter_1_all = [] xyz_scatter_2_all = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, self.sel_a[type_i] * 4]) start_index += self.sel_a[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 4 inputs_reshape = tf.reshape(inputs_i, [-1, 4]) xyz_scatter = tf.reshape( tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1]) for ii in range(1, len(outputs_size)): w = tf.get_variable( 'matrix_' + str(ii) + '_' + str(type_i), [outputs_size[ii - 1], outputs_size[ii]], self.filter_precision, tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed), trainable=trainable) b = tf.get_variable('bias_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer( stddev=stddev, mean=bavg, seed=seed), trainable=trainable) if self.filter_resnet_dt: idt = tf.get_variable( 'idt_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=0.001, mean=1.0, seed=seed), trainable=trainable) if outputs_size[ii] == outputs_size[ii - 1]: if self.filter_resnet_dt: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) elif outputs_size[ii] == outputs_size[ii - 1] * 2: if self.filter_resnet_dt: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) else: xyz_scatter = activation_fn( tf.matmul(xyz_scatter, w) + b) # natom x nei_type_i x out_size xyz_scatter = tf.reshape( xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1])) # natom x nei_type_i x 4 inputs_i_reshape = tf.reshape(inputs_i, [-1, shape_i[1] // 4, 4]) # natom x 4 x outputs_size xyz_scatter_1 = tf.matmul(inputs_i_reshape, xyz_scatter, transpose_a=True) xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape_i[1]) # natom x 4 x outputs_size_2 xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0], [-1, -1, outputs_size_2]) xyz_scatter_1_all.append(xyz_scatter_1) xyz_scatter_2_all.append(xyz_scatter_2) # for type_i in range(self.ntypes): # for type_j in range(type_i, self.ntypes): # # natom x outputs_size x outputs_size_2 # result = tf.matmul(xyz_scatter_1_all[type_i], xyz_scatter_2_all[type_j], transpose_a = True) # # natom x (outputs_size x outputs_size_2) # result = tf.reshape(result, [-1, outputs_size_2 * outputs_size[-1]]) # result_all.append(tf.identity(result)) xyz_scatter_2_coll = tf.concat(xyz_scatter_2_all, axis=2) for type_i in range(self.ntypes): # natom x outputs_size x (outputs_size_2 x ntypes) result = tf.matmul(xyz_scatter_1_all[type_i], xyz_scatter_2_coll, transpose_a=True) # natom x (outputs_size x outputs_size_2 x ntypes) result = tf.reshape( result, [-1, outputs_size_2 * self.ntypes * outputs_size[-1]]) result_all.append(tf.identity(result)) # natom x (ntypes x outputs_size x outputs_size_2 x ntypes) result_all = tf.concat(result_all, axis=1) return result_all
def build (self, coord_, atype_, natoms, box, mesh, input_dict, frz_model = None, suffix = '', reuse = None): with tf.variable_scope('model_attr' + suffix, reuse = reuse) : t_tmap = tf.constant(' '.join(self.type_map), name = 'tmap', dtype = tf.string) t_st = tf.constant(self.get_sel_type(), name = 'sel_type', dtype = tf.int32) t_mt = tf.constant(self.model_type, name = 'model_type', dtype = tf.string) t_ver = tf.constant(MODEL_VERSION, name = 'model_version', dtype = tf.string) t_od = tf.constant(self.get_out_size(), name = 'output_dim', dtype = tf.int32) natomsel = sum(natoms[2+type_i] for type_i in self.get_sel_type()) nout = self.get_out_size() if frz_model == None: dout \ = self.descrpt.build(coord_, atype_, natoms, box, mesh, input_dict, suffix = suffix, reuse = reuse) dout = tf.identity(dout, name='o_descriptor') else: tf.constant(self.rcut, name = 'descrpt_attr/rcut', dtype = GLOBAL_TF_FLOAT_PRECISION) tf.constant(self.ntypes, name = 'descrpt_attr/ntypes', dtype = tf.int32) feed_dict = self.descrpt.get_feed_dict(coord_, atype_, natoms, box, mesh) return_elements = [*self.descrpt.get_tensor_names(), 'o_descriptor:0'] imported_tensors \ = self._import_graph_def_from_frz_model(frz_model, feed_dict, return_elements) dout = imported_tensors[-1] self.descrpt.pass_tensors_from_frz_model(*imported_tensors[:-1]) rot_mat = self.descrpt.get_rot_mat() rot_mat = tf.identity(rot_mat, name = 'o_rot_mat'+suffix) output = self.fitting.build (dout, rot_mat, natoms, reuse = reuse, suffix = suffix) framesize = nout if "global" in self.model_type else natomsel * nout output = tf.reshape(output, [-1, framesize], name = 'o_' + self.model_type + suffix) model_dict = {self.model_type: output} if "global" not in self.model_type: gname = "global_"+self.model_type atom_out = tf.reshape(output, [-1, natomsel, nout]) global_out = tf.reduce_sum(atom_out, axis=1) global_out = tf.reshape(global_out, [-1, nout], name="o_" + gname + suffix) out_cpnts = tf.split(atom_out, nout, axis=-1) force_cpnts = [] virial_cpnts = [] atom_virial_cpnts = [] for out_i in out_cpnts: force_i, virial_i, atom_virial_i \ = self.descrpt.prod_force_virial(out_i, natoms) force_cpnts.append (tf.reshape(force_i, [-1, 3*natoms[1]])) virial_cpnts.append (tf.reshape(virial_i, [-1, 9])) atom_virial_cpnts.append(tf.reshape(atom_virial_i, [-1, 9*natoms[1]])) # [nframe x nout x (natom x 3)] force = tf.concat(force_cpnts, axis=1, name="o_force" + suffix) # [nframe x nout x 9] virial = tf.concat(virial_cpnts, axis=1, name="o_virial" + suffix) # [nframe x nout x (natom x 9)] atom_virial = tf.concat(atom_virial_cpnts, axis=1, name="o_atom_virial" + suffix) model_dict[gname] = global_out model_dict["force"] = force model_dict["virial"] = virial model_dict["atom_virial"] = atom_virial return model_dict