def test_cast_precision(self): x = tf.zeros(1, dtype=GLOBAL_TF_FLOAT_PRECISION) y = tf.zeros(1, dtype=tf.int64) self.assertEqual(x.dtype, GLOBAL_TF_FLOAT_PRECISION) self.assertEqual(y.dtype, tf.int64) x, y, z = self._inner_method(x, y) self.assertEqual(x.dtype, GLOBAL_TF_FLOAT_PRECISION) self.assertEqual(y.dtype, tf.int64) self.assertIsInstance(z, bool)
def _enrich(self, dipole, dof=3): coll = [] sel_start_idx = 0 for type_i in range(self.ntypes): if type_i in self.sel_type: di = tf.slice(dipole, [0, sel_start_idx * dof], [-1, self.t_natoms[2 + type_i] * dof]) sel_start_idx += self.t_natoms[2 + type_i] else: di = tf.zeros( [tf.shape(dipole)[0], self.t_natoms[2 + type_i] * dof], dtype=GLOBAL_TF_FLOAT_PRECISION) coll.append(di) return tf.concat(coll, axis=1)
def _enrich(self, dipole, dof = 3): coll = [] sel_start_idx = 0 for type_i in range(self.ntypes): if type_i in self.sel_type: di = tf.slice(dipole, [ 0, sel_start_idx * dof], [-1, self.t_natoms[2+type_i] * dof]) sel_start_idx += self.t_natoms[2+type_i] else: di = tf.zeros([tf.shape(dipole)[0], self.t_natoms[2+type_i] * dof], dtype = global_tf_float_precision) coll.append(di) return tf.concat(coll, axis = 1)
def _filter(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, trainable=True): # natom x (nei x 4) shape = inputs.get_shape().as_list() outputs_size = [1] + self.filter_neuron outputs_size_2 = self.n_axis_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, self.sel_a[type_i] * 4]) start_index += self.sel_a[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 4 inputs_reshape = tf.reshape(inputs_i, [-1, 4]) xyz_scatter = tf.reshape( tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1]) if (type_input, type_i) not in self.exclude_types: for ii in range(1, len(outputs_size)): w = tf.get_variable( 'matrix_' + str(ii) + '_' + str(type_i), [outputs_size[ii - 1], outputs_size[ii]], self.filter_precision, tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed), trainable=trainable) b = tf.get_variable( 'bias_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=stddev, mean=bavg, seed=seed), trainable=trainable) if self.filter_resnet_dt: idt = tf.get_variable( 'idt_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=0.001, mean=1.0, seed=seed), trainable=trainable) if outputs_size[ii] == outputs_size[ii - 1]: if self.filter_resnet_dt: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) elif outputs_size[ii] == outputs_size[ii - 1] * 2: if self.filter_resnet_dt: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) else: xyz_scatter = activation_fn( tf.matmul(xyz_scatter, w) + b) else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=global_tf_float_precision) xyz_scatter = tf.matmul(xyz_scatter, w) # natom x nei_type_i x out_size xyz_scatter = tf.reshape( xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1])) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x nei x 4 inputs_reshape = tf.reshape(inputs, [-1, shape[1] // 4, 4]) # natom x 4 x outputs_size xyz_scatter_1 = tf.matmul(inputs_reshape, xyz_scatter, transpose_a=True) xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape[1]) # natom x 4 x outputs_size_2 xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0], [-1, -1, outputs_size_2]) # # natom x 3 x outputs_size_2 # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1]) # natom x 3 x outputs_size_1 qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1]) # natom x outputs_size_2 x 3 qmat = tf.transpose(qmat, perm=[0, 2, 1]) # natom x outputs_size x outputs_size_2 result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True) # natom x (outputs_size x outputs_size_2) result = tf.reshape(result, [-1, outputs_size_2 * outputs_size[-1]]) return result, qmat
def _filter_r(self, inputs, type_input, natoms, activation_fn=tf.nn.tanh, stddev=1.0, bavg=0.0, name='linear', reuse=None, seed=None, trainable=True): # natom x nei outputs_size = [1] + self.filter_neuron with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] for type_i in range(self.ntypes): # cut-out inputs # with natom x nei_type_i inputs_i = tf.slice(inputs, [0, start_index], [-1, self.sel_r[type_i]]) start_index += self.sel_r[type_i] shape_i = inputs_i.get_shape().as_list() # with (natom x nei_type_i) x 1 xyz_scatter = tf.reshape(inputs_i, [-1, 1]) if (type_input, type_i) not in self.exclude_types: for ii in range(1, len(outputs_size)): w = tf.get_variable( 'matrix_' + str(ii) + '_' + str(type_i), [outputs_size[ii - 1], outputs_size[ii]], self.filter_precision, tf.random_normal_initializer( stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]), seed=seed), trainable=trainable) b = tf.get_variable( 'bias_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=stddev, mean=bavg, seed=seed), trainable=trainable) if self.filter_resnet_dt: idt = tf.get_variable( 'idt_' + str(ii) + '_' + str(type_i), [1, outputs_size[ii]], self.filter_precision, tf.random_normal_initializer(stddev=0.001, mean=1.0, seed=seed), trainable=trainable) if outputs_size[ii] == outputs_size[ii - 1]: if self.filter_resnet_dt: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter += activation_fn( tf.matmul(xyz_scatter, w) + b) elif outputs_size[ii] == outputs_size[ii - 1] * 2: if self.filter_resnet_dt: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) * idt else: xyz_scatter = tf.concat( [xyz_scatter, xyz_scatter], 1) + activation_fn( tf.matmul(xyz_scatter, w) + b) else: xyz_scatter = activation_fn( tf.matmul(xyz_scatter, w) + b) else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=global_tf_float_precision) xyz_scatter = tf.matmul(xyz_scatter, w) # natom x nei_type_i x out_size xyz_scatter = tf.reshape(xyz_scatter, (-1, shape_i[1], outputs_size[-1])) xyz_scatter_total.append(xyz_scatter) # natom x nei x outputs_size xyz_scatter = tf.concat(xyz_scatter_total, axis=1) # natom x outputs_size # res_rescale = 1. / 5. result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale return result
def build( self, inputs: tf.Tensor, natoms: tf.Tensor, input_dict: dict = None, reuse: bool = None, suffix: str = '', ) -> tf.Tensor: """ Build the computational graph for fitting net Parameters ---------- inputs The input descriptor input_dict Additional dict for inputs. if numb_fparam > 0, should have input_dict['fparam'] if numb_aparam > 0, should have input_dict['aparam'] natoms The number of atoms. This tensor has the length of Ntypes + 2 natoms[0]: number of local atoms natoms[1]: total number of atoms held by this processor natoms[i]: 2 <= i < Ntypes+2, number of type i atoms reuse The weights in the networks should be reused when get the variable. suffix Name suffix to identify this descriptor Returns ------- ener The system energy """ if input_dict is None: input_dict = {} bias_atom_e = self.bias_atom_e if self.numb_fparam > 0 and (self.fparam_avg is None or self.fparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) if self.numb_aparam > 0 and (self.aparam_avg is None or self.aparam_inv_std is None): raise RuntimeError( 'No data stat result. one should do data statisitic, before build' ) with tf.variable_scope('fitting_attr' + suffix, reuse=reuse): t_dfparam = tf.constant(self.numb_fparam, name='dfparam', dtype=tf.int32) t_daparam = tf.constant(self.numb_aparam, name='daparam', dtype=tf.int32) if self.numb_fparam > 0: t_fparam_avg = tf.get_variable( 't_fparam_avg', self.numb_fparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.fparam_avg)) t_fparam_istd = tf.get_variable( 't_fparam_istd', self.numb_fparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.fparam_inv_std)) if self.numb_aparam > 0: t_aparam_avg = tf.get_variable( 't_aparam_avg', self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.aparam_avg)) t_aparam_istd = tf.get_variable( 't_aparam_istd', self.numb_aparam, dtype=GLOBAL_TF_FLOAT_PRECISION, trainable=False, initializer=tf.constant_initializer(self.aparam_inv_std)) inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) if len(self.atom_ener): # only for atom_ener nframes = input_dict.get('nframes') if nframes is not None: # like inputs, but we don't want to add a dependency on inputs inputs_zero = tf.zeros((nframes, self.dim_descrpt * natoms[0]), dtype=self.fitting_precision) else: inputs_zero = tf.zeros_like(inputs, dtype=self.fitting_precision) if bias_atom_e is not None: assert (len(bias_atom_e) == self.ntypes) fparam = None aparam = None if self.numb_fparam > 0: fparam = input_dict['fparam'] fparam = tf.reshape(fparam, [-1, self.numb_fparam]) fparam = (fparam - t_fparam_avg) * t_fparam_istd if self.numb_aparam > 0: aparam = input_dict['aparam'] aparam = tf.reshape(aparam, [-1, self.numb_aparam]) aparam = (aparam - t_aparam_avg) * t_aparam_istd aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) type_embedding = input_dict.get('type_embedding', None) if type_embedding is not None: atype_embed = embed_atom_type(self.ntypes, natoms, type_embedding) atype_embed = tf.tile(atype_embed, [tf.shape(inputs)[0], 1]) else: atype_embed = None if atype_embed is None: start_index = 0 outs_list = [] for type_i in range(self.ntypes): if bias_atom_e is None: type_bias_ae = 0.0 else: type_bias_ae = bias_atom_e[type_i] final_layer = self._build_lower(start_index, natoms[2 + type_i], inputs, fparam, aparam, bias_atom_e=type_bias_ae, suffix='_type_' + str(type_i) + suffix, reuse=reuse) # concat the results if type_i < len( self.atom_ener) and self.atom_ener[type_i] is not None: zero_layer = self._build_lower(start_index, natoms[2 + type_i], inputs_zero, fparam, aparam, bias_atom_e=type_bias_ae, suffix='_type_' + str(type_i) + suffix, reuse=True) final_layer += self.atom_ener[type_i] - zero_layer final_layer = tf.reshape( final_layer, [tf.shape(inputs)[0], natoms[2 + type_i]]) outs_list.append(final_layer) start_index += natoms[2 + type_i] # concat the results # concat once may be faster than multiple concat outs = tf.concat(outs_list, axis=1) # with type embedding else: if len(self.atom_ener) > 0: raise RuntimeError( "setting atom_ener is not supported by type embedding") atype_embed = tf.cast(atype_embed, self.fitting_precision) type_shape = atype_embed.get_shape().as_list() inputs = tf.concat( [tf.reshape(inputs, [-1, self.dim_descrpt]), atype_embed], axis=1) self.dim_descrpt = self.dim_descrpt + type_shape[1] inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) final_layer = self._build_lower(0, natoms[0], inputs, fparam, aparam, bias_atom_e=0.0, suffix=suffix, reuse=reuse) outs = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[0]]) # add atom energy bias; TF will broadcast to all batches # tf.repeat is avaiable in TF>=2.1 or TF 1.15 _TF_VERSION = Version(TF_VERSION) if (Version('1.15') <= _TF_VERSION < Version('2') or _TF_VERSION >= Version('2.1')) and self.bias_atom_e is not None: outs += tf.repeat( tf.Variable(self.bias_atom_e, dtype=self.fitting_precision, trainable=False, name="bias_atom_ei"), natoms[2:]) if self.tot_ener_zero: force_tot_ener = 0.0 outs = tf.reshape(outs, [-1, natoms[0]]) outs_mean = tf.reshape(tf.reduce_mean(outs, axis=1), [-1, 1]) outs_mean = outs_mean - tf.ones_like( outs_mean, dtype=GLOBAL_TF_FLOAT_PRECISION) * ( force_tot_ener / global_cvt_2_tf_float(natoms[0])) outs = outs - outs_mean outs = tf.reshape(outs, [-1]) tf.summary.histogram('fitting_net_output', outs) return tf.reshape(outs, [-1])