def _pass_filter(self, inputs, natoms, reuse = None, suffix = '', trainable = True) : start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] if not self.type_one_side: for type_i in range(self.ntypes): inputs_i = tf.slice (inputs, [ 0, start_index* self.ndescrpt], [-1, natoms[2+type_i]* self.ndescrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_out()]) output.append(layer) start_index += natoms[2+type_i] else : inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) output.append(layer) output = tf.concat(output, axis = 1) return output
def build (self, input_d, rot_mat, natoms, reuse = None, suffix = '') : start_index = 0 inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]]) count = 0 outs_list = [] for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice (inputs, [ 0, start_index* self.dim_descrpt], [-1, natoms[2+type_i]* self.dim_descrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice (rot_mat, [ 0, start_index* 9], [-1, natoms[2+type_i]* 9] ) rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3]) start_index += natoms[2+type_i] if not type_i in self.sel_type : continue layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) else : layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) # (nframes x natoms) x 9 final_layer = one_layer(layer, 9, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, final_layer = True) # (nframes x natoms) x 3 x 3 final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 3, 3]) # (nframes x natoms) x 3 x 3 final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1]) # (nframes x natoms) x 3 x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # (nframes x natoms) x 3(coord) x 3(coord) final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True) # nframes x natoms x 3 x 3 final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3]) # concat the results outs_list.append(final_layer) count += 1 outs = tf.concat(outs_list, axis = 1) tf.summary.histogram('fitting_net_output', outs) return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
def build (self, input_d, rot_mat, natoms, reuse = None, suffix = '') : start_index = 0 inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice (inputs, [ 0, start_index* self.dim_descrpt], [-1, natoms[2+type_i]* self.dim_descrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice (rot_mat, [ 0, start_index* 9], [-1, natoms[2+type_i]* 9] ) rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3]) start_index += natoms[2+type_i] if not type_i in self.sel_type : continue layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) else : layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x (nwfc x 3) final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x nwfc(wc) x 3(coord_local) final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.wfc_numb, 3]) # (nframes x natoms) x nwfc(wc) x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # nframes x natoms x nwfc(wc) x 3(coord_local) final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], self.wfc_numb, 3]) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis = 1) count += 1 tf.summary.histogram('fitting_net_output', outs) return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
def _pass_filter(self, inputs, atype, natoms, input_dict, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] output_qmat = [] inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all' + suffix, natoms=natoms, reuse=reuse, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) # qmat = tf.reshape(qmat, [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3]) output.append(layer) # output_qmat.append(qmat) output = tf.concat(output, axis=1) # output_qmat = tf.concat(output_qmat, axis = 1) return output, None
def _pass_filter(self, inputs, atype, natoms, input_dict, reuse=None, suffix='', trainable=True): # nf x na x ndescrpt # nf x na x (nnei x 4) inputs = tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]) layer, qmat = self._ebd_filter(tf.cast(inputs, self.filter_precision), atype, natoms, input_dict, name='filter_type_all' + suffix, reuse=reuse, seed=self.seed, trainable=trainable, activation_fn=self.filter_activation_fn) output = tf.reshape( layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) output_qmat = tf.reshape( qmat, [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3]) return output, output_qmat
def _type_embed(self, atype, ndim=1, reuse=None, suffix='', trainable=True): ebd_type = tf.cast(atype, self.filter_precision) ebd_type = ebd_type / float(self.ntypes) ebd_type = tf.reshape(ebd_type, [-1, ndim]) for ii in range(self.type_nlayer): name = 'type_embed_layer_' + str(ii) ebd_type = one_layer(ebd_type, self.type_nchanl, activation_fn=self.filter_activation_fn, precision=self.filter_precision, name=name, reuse=reuse, seed=self.seed + ii, trainable=trainable) name = 'type_embed_layer_' + str(self.type_nlayer) ebd_type = one_layer(ebd_type, self.type_nchanl, activation_fn=None, precision=self.filter_precision, name=name, reuse=reuse, seed=self.seed + ii, trainable=trainable) ebd_type = tf.reshape(ebd_type, [tf.shape(atype)[0], self.type_nchanl]) return ebd_type
def _filter_r(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, trainable=True): # natom x nei outputs_size = [1] + self.filter_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x nei_type_i inputs_i = tf.slice(inputs, [0, start_index], [-1, self.sel_r[type_i]]) start_index += self.sel_r[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 1 xyz_scatter = tf.reshape(inputs_i, [-1, 1]) if (type_input, type_i) not in self.exclude_types: xyz_scatter = embedding_net( xyz_scatter, self.filter_neuron, self.filter_precision, activation_fn=activation_fn, resnet_dt=self.filter_resnet_dt, name_suffix="_" + str(type_i), stddev=stddev, bavg=bavg, seed=self.seed, trainable=trainable, uniform_seed=self.uniform_seed, initial_variables=self.embedding_net_variables, ) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # natom x nei_type_i x out_size xyz_scatter = tf.reshape( xyz_scatter, (-1, shape_i[1], outputs_size[-1])) else: natom = tf.shape(inputs)[0] xyz_scatter = tf.cast( tf.fill((natom, shape_i[1], outputs_size[-1]), 0.), GLOBAL_TF_FLOAT_PRECISION) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x outputs_size # res_rescale = 1. / 5. result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale return result
def build( self, ntypes: int, reuse = None, suffix = '', ): """ Build the computational graph for the descriptor Parameters ---------- ntypes Number of atom types. reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- embedded_types The computational graph for embedded types """ types = tf.convert_to_tensor( [ii for ii in range(ntypes)], dtype = tf.int32 ) ebd_type = tf.cast(tf.one_hot(tf.cast(types,dtype=tf.int32),int(ntypes)), self.filter_precision) ebd_type = tf.reshape(ebd_type, [-1, ntypes]) name = 'type_embed_net' + suffix with tf.variable_scope(name, reuse=reuse): ebd_type = embedding_net( ebd_type, self.neuron, activation_fn = self.filter_activation_fn, precision = self.filter_precision, resnet_dt = self.filter_resnet_dt, seed = self.seed, trainable = self.trainable, initial_variables = self.type_embedding_net_variables, uniform_seed = self.uniform_seed) ebd_type = tf.reshape(ebd_type, [-1, self.neuron[-1]]) # nnei * neuron[-1] self.ebd_type = tf.identity(ebd_type, name ='t_typeebd') return self.ebd_type
def _concat_type_embedding( self, xyz_scatter, nframes, natoms, type_embedding, ): '''Concatenate `type_embedding` of neighbors and `xyz_scatter`. If not self.type_one_side, concatenate `type_embedding` of center atoms as well. Parameters ---------- xyz_scatter: shape is [nframes*natoms[0]*self.nnei, 1] nframes: shape is [] natoms: shape is [1+1+self.ntypes] type_embedding: shape is [self.ntypes, Y] where Y=jdata['type_embedding']['neuron'][-1] Returns ------- embedding: environment of each atom represented by embedding. ''' te_out_dim = type_embedding.get_shape().as_list()[-1] nei_embed = tf.nn.embedding_lookup( type_embedding, tf.cast(self.nei_type, dtype=tf.int32)) # shape is [self.nnei, 1+te_out_dim] nei_embed = tf.tile( nei_embed, (nframes * natoms[0], 1)) # shape is [nframes*natoms[0]*self.nnei, te_out_dim] nei_embed = tf.reshape(nei_embed, [-1, te_out_dim]) embedding_input = tf.concat( [xyz_scatter, nei_embed], 1) # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim] if not self.type_one_side: atm_embed = embed_atom_type( self.ntypes, natoms, type_embedding) # shape is [natoms[0], te_out_dim] atm_embed = tf.tile( atm_embed, (nframes, self.nnei )) # shape is [nframes*natoms[0], self.nnei*te_out_dim] atm_embed = tf.reshape( atm_embed, [-1, te_out_dim ]) # shape is [nframes*natoms[0]*self.nnei, te_out_dim] embedding_input = tf.concat( [embedding_input, atm_embed], 1 ) # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim+te_out_dim] return embedding_input
def _build_network(self, data): self.place_holders = {} if self.is_compress: for kk in ['coord', 'box']: self.place_holders[kk] = tf.placeholder( GLOBAL_TF_FLOAT_PRECISION, [None], 't_' + kk) self._get_place_horders(data_requirement) else: self._get_place_horders(data.get_data_dict()) self.place_holders['type'] = tf.placeholder(tf.int32, [None], name='t_type') self.place_holders['natoms_vec'] = tf.placeholder(tf.int32, [self.ntypes + 2], name='t_natoms') self.place_holders['default_mesh'] = tf.placeholder(tf.int32, [None], name='t_mesh') self.place_holders['is_training'] = tf.placeholder(tf.bool) self.model_pred\ = self.model.build (self.place_holders['coord'], self.place_holders['type'], self.place_holders['natoms_vec'], self.place_holders['box'], self.place_holders['default_mesh'], self.place_holders, self.frz_model, suffix = "", reuse = False) self.l2_l, self.l2_more\ = self.loss.build (self.learning_rate, self.place_holders['natoms_vec'], self.model_pred, self.place_holders, suffix = "test") if self.mixed_prec is not None: self.l2_l = tf.cast(self.l2_l, get_precision(self.mixed_prec['output_prec'])) log.info("built network")
def safe_cast_tensor(input: tf.Tensor, from_precision: tf.DType, to_precision: tf.DType) -> tf.Tensor: """Convert a Tensor from a precision to another precision. If input is not a Tensor or without the specific precision, the method will not cast it. Parameters ---------- input: tf.Tensor input tensor precision : tf.DType Tensor data type that casts to Returns ------- tf.Tensor casted Tensor """ if tensor_util.is_tensor(input) and input.dtype == from_precision: return tf.cast(input, to_precision) return input
def embed_atom_type( ntypes: int, natoms: tf.Tensor, type_embedding: tf.Tensor, ): """ Make the embedded type for the atoms in system. The atoms are assumed to be sorted according to the type, thus their types are described by a `tf.Tensor` natoms, see explanation below. Parameters ---------- ntypes: Number of types. natoms: The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms type_embedding: The type embedding. It has the shape of [ntypes, embedding_dim] Returns ------- atom_embedding The embedded type of each atom. It has the shape of [numb_atoms, embedding_dim] """ te_out_dim = type_embedding.get_shape().as_list()[-1] atype = [] for ii in range(ntypes): atype.append(tf.tile([ii], [natoms[2 + ii]])) atype = tf.concat(atype, axis=0) atm_embed = tf.nn.embedding_lookup( type_embedding, tf.cast(atype, dtype=tf.int32)) #(nf*natom)*nchnl atm_embed = tf.reshape(atm_embed, [-1, te_out_dim]) return atm_embed
def _pass_filter(self, inputs, natoms, reuse=None, suffix='', trainable=True): start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] output_qmat = [] for type_i in range(self.ntypes): inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt], [-1, natoms[2 + type_i] * self.ndescrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) layer, qmat = self._filter( tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_' + str(type_i) + suffix, natoms=natoms, reuse=reuse, seed=self.seed, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out()]) qmat = tf.reshape(qmat, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_rot_mat_1() * 3 ]) output.append(layer) output_qmat.append(qmat) start_index += natoms[2 + type_i] output = tf.concat(output, axis=1) output_qmat = tf.concat(output_qmat, axis=1) return output, output_qmat
def _build_lower(self, start_index, natoms, inputs, fparam=None, aparam=None, bias_atom_e=0.0, suffix='', reuse=None): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) layer = inputs_i if fparam is not None: ext_fparam = tf.tile(fparam, [1, natoms]) ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) ext_fparam = tf.cast(ext_fparam, self.fitting_precision) layer = tf.concat([layer, ext_fparam], axis=1) if aparam is not None: ext_aparam = tf.slice(aparam, [0, start_index * self.numb_aparam], [-1, natoms * self.numb_aparam]) ext_aparam = tf.reshape(ext_aparam, [-1, self.numb_aparam]) ext_aparam = tf.cast(ext_aparam, self.fitting_precision) layer = tf.concat([layer, ext_aparam], axis=1) for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii], uniform_seed=self.uniform_seed, initial_variables=self.fitting_net_variables, mixed_prec=self.mixed_prec) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii], uniform_seed=self.uniform_seed, initial_variables=self.fitting_net_variables, mixed_prec=self.mixed_prec) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift final_layer = one_layer(layer, 1, activation_fn=None, bavg=bias_atom_e, name='final_layer' + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision, trainable=self.trainable[-1], uniform_seed=self.uniform_seed, initial_variables=self.fitting_net_variables, mixed_prec=self.mixed_prec, final_layer=True) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift return final_layer
def build(self, input_d: tf.Tensor, rot_mat: tf.Tensor, natoms: tf.Tensor, reuse: bool = None, suffix: str = ''): """ Build the computational graph for fitting net Parameters ---------- input_d The input descriptor rot_mat The rotation matrix from the descriptor. natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- atomic_polar The atomic polarizability """ start_index = 0 inputs = tf.cast( tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat], [-1, natoms[2 + type_i] * self.dim_rot_mat]) rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) start_index += natoms[2 + type_i] if not type_i in self.sel_type: continue layer = inputs_i for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, uniform_seed=self.uniform_seed) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, uniform_seed=self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift if self.fit_diag: bavg = np.zeros(self.dim_rot_mat_1) # bavg[0] = self.avgeig[0] # bavg[1] = self.avgeig[1] # bavg[2] = self.avgeig[2] # (nframes x natoms) x naxis final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, bavg=bavg, precision=self.fitting_precision, uniform_seed=self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1 ]) # (nframes x natoms) x naxis x naxis final_layer = tf.matrix_diag(final_layer) else: bavg = np.zeros(self.dim_rot_mat_1 * self.dim_rot_mat_1) # bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0] # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1] # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2] # (nframes x natoms) x (naxis x naxis) final_layer = one_layer( layer, self.dim_rot_mat_1 * self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, bavg=bavg, precision=self.fitting_precision, uniform_seed=self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x naxis x naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1, self.dim_rot_mat_1 ]) # (nframes x natoms) x naxis x naxis final_layer = final_layer + tf.transpose(final_layer, perm=[0, 2, 1]) # (nframes x natoms) x naxis x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # (nframes x natoms) x 3(coord) x 3(coord) final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a=True) # nframes x natoms x 3 x 3 final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3, 3]) # shift and scale sel_type_idx = self.sel_type.index(type_i) final_layer = final_layer * self.scale[sel_type_idx] final_layer = final_layer + self.constant_matrix[ sel_type_idx] * tf.eye( 3, batch_shape=[tf.shape(inputs)[0], natoms[2 + type_i]], dtype=GLOBAL_TF_FLOAT_PRECISION) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) count += 1 tf.summary.histogram('fitting_net_output', outs) return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
def one_layer(inputs, outputs_size, activation_fn=tf.nn.tanh, precision=GLOBAL_TF_FLOAT_PRECISION, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, use_timestep=False, trainable=True, useBN=False, uniform_seed=False, initial_variables=None, mixed_prec=None, final_layer=False): # For good accuracy, the last layer of the fitting network uses a higher precision neuron network. if mixed_prec is not None and final_layer: inputs = tf.cast(inputs, get_precision(mixed_prec['output_prec'])) with tf.variable_scope(name, reuse=reuse): shape = inputs.get_shape().as_list() w_initializer = tf.random_normal_initializer( stddev=stddev / np.sqrt(shape[1] + outputs_size), seed=seed if (seed is None or uniform_seed) else seed + 0) b_initializer = tf.random_normal_initializer( stddev=stddev, mean=bavg, seed=seed if (seed is None or uniform_seed) else seed + 1) if initial_variables is not None: w_initializer = tf.constant_initializer( initial_variables[name + '/matrix']) b_initializer = tf.constant_initializer(initial_variables[name + '/bias']) w = tf.get_variable('matrix', [shape[1], outputs_size], precision, w_initializer, trainable=trainable) variable_summaries(w, 'matrix') b = tf.get_variable('bias', [outputs_size], precision, b_initializer, trainable=trainable) variable_summaries(b, 'bias') if mixed_prec is not None and not final_layer: inputs = tf.cast(inputs, get_precision(mixed_prec['compute_prec'])) w = tf.cast(w, get_precision(mixed_prec['compute_prec'])) b = tf.cast(b, get_precision(mixed_prec['compute_prec'])) hidden = tf.nn.bias_add(tf.matmul(inputs, w), b) if activation_fn != None and use_timestep: idt_initializer = tf.random_normal_initializer( stddev=0.001, mean=0.1, seed=seed if (seed is None or uniform_seed) else seed + 2) if initial_variables is not None: idt_initializer = tf.constant_initializer( initial_variables[name + '/idt']) idt = tf.get_variable('idt', [outputs_size], precision, idt_initializer, trainable=trainable) variable_summaries(idt, 'idt') if activation_fn != None: if useBN: None # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse) # return activation_fn(hidden_bn) else: if use_timestep: if mixed_prec is not None and not final_layer: idt = tf.cast( idt, get_precision(mixed_prec['compute_prec'])) hidden = tf.reshape(activation_fn(hidden), [-1, outputs_size]) * idt else: hidden = tf.reshape(activation_fn(hidden), [-1, outputs_size]) if mixed_prec is not None: hidden = tf.cast(hidden, get_precision(mixed_prec['output_prec'])) return hidden
def embedding_net(xx, network_size, precision, activation_fn=tf.nn.tanh, resnet_dt=False, name_suffix='', stddev=1.0, bavg=0.0, seed=None, trainable=True, uniform_seed=False, initial_variables=None, mixed_prec=None): r"""The embedding network. The embedding network function :math:`\mathcal{N}` is constructed by is the composition of multiple layers :math:`\mathcal{L}^{(i)}`: .. math:: \mathcal{N} = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)} \circ \cdots \circ \mathcal{L}^{(1)} A layer :math:`\mathcal{L}` is given by one of the following forms, depending on the number of nodes: [1]_ .. math:: \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})= \begin{cases} \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + \mathbf{x}, & N_2=N_1 \\ \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + (\mathbf{x}, \mathbf{x}), & N_2 = 2N_1\\ \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}), & \text{otherwise} \\ \end{cases} where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}` is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and :math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively, both of which are trainable if `trainable` is `True`. :math:`\boldsymbol{\phi}` is the activation function. Parameters ---------- xx : Tensor Input tensor :math:`\mathbf{x}` of shape [-1,1] network_size: list of int Size of the embedding network. For example [16,32,64] precision: Precision of network weights. For example, tf.float64 activation_fn: Activation function :math:`\boldsymbol{\phi}` resnet_dt: boolean Using time-step in the ResNet construction name_suffix: str The name suffix append to each variable. stddev: float Standard deviation of initializing network parameters bavg: float Mean of network intial bias seed: int Random seed for initializing network parameters trainable: boolean If the network is trainable uniform_seed : boolean Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed initial_variables : dict The input dict which stores the embedding net variables mixed_prec The input dict which stores the mixed precision setting for the embedding net References ---------- .. [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identitymappings in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer International Publishing, 2016. """ input_shape = xx.get_shape().as_list() outputs_size = [input_shape[1]] + network_size for ii in range(1, len(outputs_size)): w_initializer = tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed if (seed is None or uniform_seed) else seed + ii * 3 + 0) b_initializer = tf.random_normal_initializer( stddev=stddev, mean=bavg, seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 1) if initial_variables is not None: scope = tf.get_variable_scope().name w_initializer = tf.constant_initializer( initial_variables[scope + '/matrix_' + str(ii) + name_suffix]) b_initializer = tf.constant_initializer( initial_variables[scope + '/bias_' + str(ii) + name_suffix]) w = tf.get_variable('matrix_' + str(ii) + name_suffix, [outputs_size[ii - 1], outputs_size[ii]], precision, w_initializer, trainable=trainable) variable_summaries(w, 'matrix_' + str(ii) + name_suffix) b = tf.get_variable('bias_' + str(ii) + name_suffix, [outputs_size[ii]], precision, b_initializer, trainable=trainable) variable_summaries(b, 'bias_' + str(ii) + name_suffix) if mixed_prec is not None: xx = tf.cast(xx, get_precision(mixed_prec['compute_prec'])) w = tf.cast(w, get_precision(mixed_prec['compute_prec'])) b = tf.cast(b, get_precision(mixed_prec['compute_prec'])) hidden = tf.reshape(activation_fn(tf.nn.bias_add(tf.matmul(xx, w), b)), [-1, outputs_size[ii]]) if resnet_dt: idt_initializer = tf.random_normal_initializer( stddev=0.001, mean=1.0, seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 2) if initial_variables is not None: scope = tf.get_variable_scope().name idt_initializer = tf.constant_initializer( initial_variables[scope + '/idt_' + str(ii) + name_suffix]) idt = tf.get_variable('idt_' + str(ii) + name_suffix, [1, outputs_size[ii]], precision, idt_initializer, trainable=trainable) variable_summaries(idt, 'idt_' + str(ii) + name_suffix) if mixed_prec is not None: idt = tf.cast(idt, get_precision(mixed_prec['compute_prec'])) if outputs_size[ii] == outputs_size[ii - 1]: if resnet_dt: xx += hidden * idt else: xx += hidden elif outputs_size[ii] == outputs_size[ii - 1] * 2: if resnet_dt: xx = tf.concat([xx, xx], 1) + hidden * idt else: xx = tf.concat([xx, xx], 1) + hidden else: xx = hidden if mixed_prec is not None: xx = tf.cast(xx, get_precision(mixed_prec['output_prec'])) return xx
def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''): start_index = 0 inputs = tf.cast( tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat], [-1, natoms[2 + type_i] * self.dim_rot_mat]) rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) start_index += natoms[2 + type_i] if not type_i in self.sel_type: continue layer = inputs_i for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) # (nframes x natoms) x naxis final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision) # (nframes x natoms) x 1 * naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], 1, self.dim_rot_mat_1 ]) # (nframes x natoms) x 1 x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # nframes x natoms x 3 final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3]) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) count += 1 return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
def build(self, input_d: tf.Tensor, rot_mat: tf.Tensor, natoms: tf.Tensor, reuse: bool = None, suffix: str = '') -> tf.Tensor: """ Build the computational graph for fitting net Parameters ---------- input_d The input descriptor rot_mat The rotation matrix from the descriptor. natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- dipole The atomic dipole. """ start_index = 0 inputs = tf.cast( tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat], [-1, natoms[2 + type_i] * self.dim_rot_mat]) rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) start_index += natoms[2 + type_i] if not type_i in self.sel_type: continue layer = inputs_i for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, uniform_seed=self.uniform_seed) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, uniform_seed=self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x naxis final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision, uniform_seed=self.uniform_seed) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x 1 * naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], 1, self.dim_rot_mat_1 ]) # (nframes x natoms) x 1 x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # nframes x natoms x 3 final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3]) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) count += 1 tf.summary.histogram('fitting_net_output', outs) return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
def build(self, inputs, input_dict, natoms, reuse=None, suffix=''): bias_atom_e = self.bias_atom_e if self.numb_fparam > 0 and (self.fparam_avg is None or self.fparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) if self.numb_aparam > 0 and (self.aparam_avg is None or self.aparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) with tf.variable_scope('fitting_attr' + suffix, reuse=reuse): t_dfparam = tf.constant(self.numb_fparam, name='dfparam', dtype=tf.int32) t_daparam = tf.constant(self.numb_aparam, name='daparam', dtype=tf.int32) if self.numb_fparam > 0: t_fparam_avg = tf.get_variable( 't_fparam_avg', self.numb_fparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.fparam_avg)) t_fparam_istd = tf.get_variable( 't_fparam_istd', self.numb_fparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.fparam_inv_std)) if self.numb_aparam > 0: t_aparam_avg = tf.get_variable( 't_aparam_avg', self.numb_aparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.aparam_avg)) t_aparam_istd = tf.get_variable( 't_aparam_istd', self.numb_aparam, dtype=global_tf_float_precision, trainable=False, initializer=tf.constant_initializer(self.aparam_inv_std)) start_index = 0 inputs = tf.cast( tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) if bias_atom_e is not None: assert (len(bias_atom_e) == self.ntypes) if self.numb_fparam > 0: fparam = input_dict['fparam'] fparam = tf.reshape(fparam, [-1, self.numb_fparam]) fparam = (fparam - t_fparam_avg) * t_fparam_istd if self.numb_aparam > 0: aparam = input_dict['aparam'] aparam = tf.reshape(aparam, [-1, self.numb_aparam]) aparam = (aparam - t_aparam_avg) * t_aparam_istd aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) layer = inputs_i if self.numb_fparam > 0: ext_fparam = tf.tile(fparam, [1, natoms[2 + type_i]]) ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) layer = tf.concat([layer, ext_fparam], axis=1) if self.numb_aparam > 0: ext_aparam = tf.slice( aparam, [0, start_index * self.numb_aparam], [-1, natoms[2 + type_i] * self.numb_aparam]) ext_aparam = tf.reshape(ext_aparam, [-1, self.numb_aparam]) layer = tf.concat([layer, ext_aparam], axis=1) start_index += natoms[2 + type_i] if bias_atom_e is None: type_bias_ae = 0.0 else: type_bias_ae = bias_atom_e[type_i] for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) final_layer = one_layer(layer, 1, activation_fn=None, bavg=type_bias_ae, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, precision=self.fitting_precision, trainable=self.trainable[-1]) if type_i < len( self.atom_ener) and self.atom_ener[type_i] is not None: inputs_zero = tf.zeros_like(inputs_i, dtype=global_tf_float_precision) layer = inputs_zero if self.numb_fparam > 0: layer = tf.concat([layer, ext_fparam], axis=1) if self.numb_aparam > 0: layer = tf.concat([layer, ext_aparam], axis=1) for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=True, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) else: layer = one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=True, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision, trainable=self.trainable[ii]) zero_layer = one_layer(layer, 1, activation_fn=None, bavg=type_bias_ae, name='final_layer_type_' + str(type_i) + suffix, reuse=True, seed=self.seed, precision=self.fitting_precision, trainable=self.trainable[-1]) final_layer += self.atom_ener[type_i] - zero_layer final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2 + type_i]]) # concat the results if type_i == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
def _filter(self, inputs, type_input, natoms, type_embedding=None, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, trainable=True): nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0] # natom x (nei x 4) shape = inputs.get_shape().as_list() outputs_size = [1] + self.filter_neuron outputs_size_2 = self.n_axis_neuron all_excluded = all([(type_input, type_i) in self.exclude_types for type_i in range(self.ntypes)]) if all_excluded: # all types are excluded so result and qmat should be zeros # we can safaly return a zero matrix... # See also https://stackoverflow.com/a/34725458/9567349 # result: natom x outputs_size x outputs_size_2 # qmat: natom x outputs_size x 3 natom = tf.shape(inputs)[0] result = tf.cast( tf.fill((natom, outputs_size_2, outputs_size[-1]), 0.), GLOBAL_TF_FLOAT_PRECISION) qmat = tf.cast(tf.fill((natom, outputs_size[-1], 3), 0.), GLOBAL_TF_FLOAT_PRECISION) return result, qmat with tf.variable_scope(name, reuse=reuse): start_index = 0 type_i = 0 # natom x 4 x outputs_size if type_embedding is None: rets = [] for type_i in range(self.ntypes): ret = self._filter_lower(type_i, type_input, start_index, self.sel_a[type_i], inputs, nframes, natoms, type_embedding=type_embedding, is_exclude=(type_input, type_i) in self.exclude_types, activation_fn=activation_fn, stddev=stddev, bavg=bavg, trainable=trainable, suffix="_" + str(type_i)) if (type_input, type_i) not in self.exclude_types: # add zero is meaningless; skip rets.append(ret) start_index += self.sel_a[type_i] # faster to use accumulate_n than multiple add xyz_scatter_1 = tf.accumulate_n(rets) else: xyz_scatter_1 = self._filter_lower( type_i, type_input, start_index, np.cumsum(self.sel_a)[-1], inputs, nframes, natoms, type_embedding=type_embedding, is_exclude=False, activation_fn=activation_fn, stddev=stddev, bavg=bavg, trainable=trainable) # natom x nei x outputs_size # xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x nei x 4 # inputs_reshape = tf.reshape(inputs, [-1, shape[1]//4, 4]) # natom x 4 x outputs_size # xyz_scatter_1 = tf.matmul(inputs_reshape, xyz_scatter, transpose_a = True) if self.original_sel is None: # shape[1] = nnei * 4 nnei = shape[1] / 4 else: nnei = tf.cast( tf.Variable(np.sum(self.original_sel), dtype=tf.int32, trainable=False, name="nnei"), self.filter_precision) xyz_scatter_1 = xyz_scatter_1 / nnei # natom x 4 x outputs_size_2 xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0], [-1, -1, outputs_size_2]) # # natom x 3 x outputs_size_2 # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1]) # natom x 3 x outputs_size_1 qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1]) # natom x outputs_size_1 x 3 qmat = tf.transpose(qmat, perm=[0, 2, 1]) # natom x outputs_size x outputs_size_2 result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True) # natom x (outputs_size x outputs_size_2) result = tf.reshape(result, [-1, outputs_size_2 * outputs_size[-1]]) return result, qmat
def _filter_lower( self, type_i, type_input, start_index, incrs_index, inputs, nframes, natoms, type_embedding=None, is_exclude=False, activation_fn=None, bavg=0.0, stddev=1.0, trainable=True, suffix='', ): """ input env matrix, returns R.G """ outputs_size = [1] + self.filter_neuron # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4]) shape_i = inputs_i.get_shape().as_list() natom = tf.shape(inputs_i)[0] # with (natom x nei_type_i) x 4 inputs_reshape = tf.reshape(inputs_i, [-1, 4]) # with (natom x nei_type_i) x 1 xyz_scatter = tf.reshape(tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1]) if type_embedding is not None: xyz_scatter = self._concat_type_embedding(xyz_scatter, nframes, natoms, type_embedding) if self.compress: raise RuntimeError( 'compression of type embedded descriptor is not supported at the moment' ) # natom x 4 x outputs_size if self.compress and (not is_exclude): info = [ self.lower, self.upper, self.upper * self.table_config[0], self.table_config[1], self.table_config[2], self.table_config[3] ] if self.type_one_side: net = 'filter_-1_net_' + str(type_i) else: net = 'filter_' + str(type_input) + '_net_' + str(type_i) return op_module.tabulate_fusion_se_a( tf.cast(self.table.data[net], self.filter_precision), info, xyz_scatter, tf.reshape(inputs_i, [natom, shape_i[1] // 4, 4]), last_layer_size=outputs_size[-1]) else: if (not is_exclude): # with (natom x nei_type_i) x out_size xyz_scatter = embedding_net( xyz_scatter, self.filter_neuron, self.filter_precision, activation_fn=activation_fn, resnet_dt=self.filter_resnet_dt, name_suffix=suffix, stddev=stddev, bavg=bavg, seed=self.seed, trainable=trainable, uniform_seed=self.uniform_seed, initial_variables=self.embedding_net_variables, mixed_prec=self.mixed_prec) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift else: # we can safely return the final xyz_scatter filled with zero directly return tf.cast(tf.fill((natom, 4, outputs_size[-1]), 0.), self.filter_precision) # natom x nei_type_i x out_size xyz_scatter = tf.reshape(xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1])) # When using tf.reshape(inputs_i, [-1, shape_i[1]//4, 4]) below # [588 24] -> [588 6 4] correct # but if sel is zero # [588 0] -> [147 0 4] incorrect; the correct one is [588 0 4] # So we need to explicitly assign the shape to tf.shape(inputs_i)[0] instead of -1 # natom x 4 x outputs_size return tf.matmul(tf.reshape(inputs_i, [natom, shape_i[1] // 4, 4]), xyz_scatter, transpose_a=True)
def _filter(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, trainable=True): # natom x (nei x 4) shape = inputs.get_shape().as_list() outputs_size = [1] + self.filter_neuron with tf.variable_scope(name, reuse=reuse): start_index_i = 0 result = None for type_i in range(self.ntypes): # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index_i * 4], [-1, self.sel_a[type_i] * 4]) start_index_j = start_index_i start_index_i += self.sel_a[type_i] nei_type_i = self.sel_a[type_i] shape_i = inputs_i.get_shape().as_list() assert (shape_i[1] == nei_type_i * 4) # with natom x nei_type_i x 4 env_i = tf.reshape(inputs_i, [-1, nei_type_i, 4]) # with natom x nei_type_i x 3 env_i = tf.slice(env_i, [0, 0, 1], [-1, -1, -1]) for type_j in range(type_i, self.ntypes): # with natom x (nei_type_j x 4) inputs_j = tf.slice(inputs, [0, start_index_j * 4], [-1, self.sel_a[type_j] * 4]) start_index_j += self.sel_a[type_j] nei_type_j = self.sel_a[type_j] shape_j = inputs_j.get_shape().as_list() assert (shape_j[1] == nei_type_j * 4) # with natom x nei_type_j x 4 env_j = tf.reshape(inputs_j, [-1, nei_type_j, 4]) # with natom x nei_type_i x 3 env_j = tf.slice(env_j, [0, 0, 1], [-1, -1, -1]) # with natom x nei_type_i x nei_type_j env_ij = tf.einsum('ijm,ikm->ijk', env_i, env_j) # with (natom x nei_type_i x nei_type_j) ebd_env_ij = tf.reshape(env_ij, [-1, 1]) if self.compress: info = [ self.lower, self.upper, self.upper * self.table_config[0], self.table_config[1], self.table_config[2], self.table_config[3] ] net = 'filter_' + str(type_i) + '_net_' + str(type_j) res_ij = op_module.tabulate_fusion_se_t( tf.cast(self.table.data[net], self.filter_precision), info, ebd_env_ij, env_ij, last_layer_size=outputs_size[-1]) else: # with (natom x nei_type_i x nei_type_j) x out_size ebd_env_ij = embedding_net( ebd_env_ij, self.filter_neuron, self.filter_precision, activation_fn=activation_fn, resnet_dt=self.filter_resnet_dt, name_suffix=f"_{type_i}_{type_j}", stddev=stddev, bavg=bavg, seed=self.seed, trainable=trainable, uniform_seed=self.uniform_seed, initial_variables=self.embedding_net_variables, ) if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # with natom x nei_type_i x nei_type_j x out_size ebd_env_ij = tf.reshape( ebd_env_ij, [-1, nei_type_i, nei_type_j, outputs_size[-1]]) # with natom x out_size res_ij = tf.einsum('ijk,ijkm->im', env_ij, ebd_env_ij) res_ij = res_ij * (1.0 / float(nei_type_i) / float(nei_type_j)) if result is None: result = res_ij else: result += res_ij return result, None
def build( self, inputs: tf.Tensor, natoms: tf.Tensor, input_dict: dict = None, reuse: bool = None, suffix: str = '', ) -> tf.Tensor: """ Build the computational graph for fitting net Parameters ---------- inputs The input descriptor input_dict Additional dict for inputs. if numb_fparam > 0, should have input_dict['fparam'] if numb_aparam > 0, should have input_dict['aparam'] natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- ener The system energy """ if input_dict is None: input_dict = {} bias_atom_e = self.bias_atom_e if self.numb_fparam > 0 and (self.fparam_avg is None or self.fparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) if self.numb_aparam > 0 and (self.aparam_avg is None or self.aparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) with tf.variable_scope('fitting_attr' + suffix, reuse=reuse): t_dfparam = tf.constant(self.numb_fparam, name='dfparam', dtype=tf.int32) t_daparam = tf.constant(self.numb_aparam, name='daparam', dtype=tf.int32) if self.numb_fparam > 0: t_fparam_avg = tf.get_variable( 't_fparam_avg', self.numb_fparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.fparam_avg)) t_fparam_istd = tf.get_variable( 't_fparam_istd', self.numb_fparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.fparam_inv_std)) if self.numb_aparam > 0: t_aparam_avg = tf.get_variable( 't_aparam_avg', self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.aparam_avg)) t_aparam_istd = tf.get_variable( 't_aparam_istd', self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.aparam_inv_std)) inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) if len(self.atom_ener): # only for atom_ener nframes = input_dict.get('nframes') if nframes is not None: # like inputs, but we don't want to add a dependency on inputs inputs_zero = tf.zeros((nframes, self.dim_descrpt * natoms[0]), dtype=self.fitting_precision) else: inputs_zero = tf.zeros_like(inputs, dtype=self.fitting_precision) if bias_atom_e is not None: assert (len(bias_atom_e) == self.ntypes) fparam = None aparam = None if self.numb_fparam > 0: fparam = input_dict['fparam'] fparam = tf.reshape(fparam, [-1, self.numb_fparam]) fparam = (fparam - t_fparam_avg) * t_fparam_istd if self.numb_aparam > 0: aparam = input_dict['aparam'] aparam = tf.reshape(aparam, [-1, self.numb_aparam]) aparam = (aparam - t_aparam_avg) * t_aparam_istd aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) type_embedding = input_dict.get('type_embedding', None) if type_embedding is not None: atype_embed = embed_atom_type(self.ntypes, natoms, type_embedding) atype_embed = tf.tile(atype_embed, [tf.shape(inputs)[0], 1]) else: atype_embed = None if atype_embed is None: start_index = 0 outs_list = [] for type_i in range(self.ntypes): if bias_atom_e is None: type_bias_ae = 0.0 else: type_bias_ae = bias_atom_e[type_i] final_layer = self._build_lower(start_index, natoms[2 + type_i], inputs, fparam, aparam, bias_atom_e=type_bias_ae, suffix='_type_' + str(type_i) + suffix, reuse=reuse) # concat the results if type_i < len( self.atom_ener) and self.atom_ener[type_i] is not None: zero_layer = self._build_lower(start_index, natoms[2 + type_i], inputs_zero, fparam, aparam, bias_atom_e=type_bias_ae, suffix='_type_' + str(type_i) + suffix, reuse=True) final_layer += self.atom_ener[type_i] - zero_layer final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i]]) outs_list.append(final_layer) start_index += natoms[2 + type_i] # concat the results # concat once may be faster than multiple concat outs = tf.concat(outs_list, axis=1) # with type embedding else: if len(self.atom_ener) > 0: raise RuntimeError( "setting atom_ener is not supported by type embedding") atype_embed = tf.cast(atype_embed, self.fitting_precision) type_shape = atype_embed.get_shape().as_list() inputs = tf.concat( [tf.reshape(inputs, [-1, self.dim_descrpt]), atype_embed], axis=1) self.dim_descrpt = self.dim_descrpt + type_shape[1] inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) final_layer = self._build_lower(0, natoms[0], inputs, fparam, aparam, bias_atom_e=0.0, suffix=suffix, reuse=reuse) outs = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[0]]) # add atom energy bias; TF will broadcast to all batches # tf.repeat is avaiable in TF>=2.1 or TF 1.15 _TF_VERSION = Version(TF_VERSION) if (Version('1.15') <= _TF_VERSION < Version('2') or _TF_VERSION >= Version('2.1')) and self.bias_atom_e is not None: outs += tf.repeat( tf.Variable(self.bias_atom_e, dtype=self.fitting_precision, trainable=False, name="bias_atom_ei"), natoms[2:]) if self.tot_ener_zero: force_tot_ener = 0.0 outs = tf.reshape(outs, [-1, natoms[0]]) outs_mean = tf.reshape(tf.reduce_mean(outs, axis=1), [-1, 1]) outs_mean = outs_mean - tf.ones_like( outs_mean, dtype=GLOBAL_TF_FLOAT_PRECISION) * ( force_tot_ener / global_cvt_2_tf_float(natoms[0])) outs = outs - outs_mean outs = tf.reshape(outs, [-1]) tf.summary.histogram('fitting_net_output', outs) return tf.reshape(outs, [-1])
def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''): start_index = 0 inputs = tf.cast( tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) count = 0 for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt], [-1, natoms[2 + type_i] * self.dim_descrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat], [-1, natoms[2 + type_i] * self.dim_rot_mat]) rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) start_index += natoms[2 + type_i] if not type_i in self.sel_type: continue layer = inputs_i for ii in range(0, len(self.n_neuron)): if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: layer += one_layer( layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, use_timestep=self.resnet_dt, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) else: layer = one_layer(layer, self.n_neuron[ii], name='layer_' + str(ii) + '_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, activation_fn=self.fitting_activation_fn, precision=self.fitting_precision) if self.fit_diag: bavg = np.zeros(self.dim_rot_mat_1) # bavg[0] = self.avgeig[0] # bavg[1] = self.avgeig[1] # bavg[2] = self.avgeig[2] # (nframes x natoms) x naxis final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, bavg=bavg, precision=self.fitting_precision) # (nframes x natoms) x naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1 ]) # (nframes x natoms) x naxis x naxis final_layer = tf.matrix_diag(final_layer) else: bavg = np.zeros(self.dim_rot_mat_1 * self.dim_rot_mat_1) # bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0] # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1] # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2] # (nframes x natoms) x (naxis x naxis) final_layer = one_layer( layer, self.dim_rot_mat_1 * self.dim_rot_mat_1, activation_fn=None, name='final_layer_type_' + str(type_i) + suffix, reuse=reuse, seed=self.seed, bavg=bavg, precision=self.fitting_precision) # (nframes x natoms) x naxis x naxis final_layer = tf.reshape(final_layer, [ tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1, self.dim_rot_mat_1 ]) # (nframes x natoms) x naxis x naxis final_layer = final_layer + tf.transpose(final_layer, perm=[0, 2, 1]) # (nframes x natoms) x naxis x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # (nframes x natoms) x 3(coord) x 3(coord) final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a=True) # nframes x natoms x 3 x 3 final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3, 3]) # shift and scale sel_type_idx = self.sel_type.index(type_i) final_layer = final_layer * self.scale[sel_type_idx] final_layer = final_layer + self.diag_shift[sel_type_idx] * tf.eye( 3, batch_shape=[tf.shape(inputs)[0], natoms[2 + type_i]], dtype=global_tf_float_precision) # concat the results if count == 0: outs = final_layer else: outs = tf.concat([outs, final_layer], axis=1) count += 1 return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
def _pass_filter(self, inputs, atype, natoms, input_dict, reuse=None, suffix='', trainable=True): if input_dict is not None: type_embedding = input_dict.get('type_embedding', None) else: type_embedding = None start_index = 0 inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]]) output = [] output_qmat = [] if not self.type_one_side and type_embedding is None: for type_i in range(self.ntypes): inputs_i = tf.slice(inputs, [0, start_index * self.ndescrpt], [-1, natoms[2 + type_i] * self.ndescrpt]) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) layer, qmat = self._filter( tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_' + str(type_i) + suffix, natoms=natoms, reuse=reuse, trainable=trainable, activation_fn=self.filter_activation_fn) layer = tf.reshape(layer, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_out() ]) qmat = tf.reshape(qmat, [ tf.shape(inputs)[0], natoms[2 + type_i] * self.get_dim_rot_mat_1() * 3 ]) output.append(layer) output_qmat.append(qmat) start_index += natoms[2 + type_i] else: inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all' + suffix, natoms=natoms, reuse=reuse, trainable=trainable, activation_fn=self.filter_activation_fn, type_embedding=type_embedding) layer = tf.reshape( layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) qmat = tf.reshape(qmat, [ tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3 ]) output.append(layer) output_qmat.append(qmat) output = tf.concat(output, axis=1) output_qmat = tf.concat(output_qmat, axis=1) return output, output_qmat