Exemple #1
0
    def _type_embedding_net_one_side(self,
                                     mat_g,
                                     atype,
                                     natoms,
                                     name='',
                                     reuse=None,
                                     seed=None,
                                     trainable=True):
        outputs_size = self.filter_neuron[-1]
        nframes = tf.shape(mat_g)[0]
        # (nf x natom x nei) x (outputs_size x chnl x chnl)
        mat_g = tf.reshape(mat_g,
                           [nframes * natoms[0] * self.nnei, outputs_size])
        mat_g = one_layer(mat_g,
                          outputs_size * self.type_nchanl,
                          activation_fn=None,
                          precision=self.filter_precision,
                          name=name + '_amplify',
                          reuse=reuse,
                          seed=self.seed,
                          trainable=trainable)
        # nf x natom x nei x outputs_size x chnl
        mat_g = tf.reshape(
            mat_g,
            [nframes, natoms[0], self.nnei, outputs_size, self.type_nchanl])
        # nf x natom x outputs_size x nei x chnl
        mat_g = tf.transpose(mat_g, perm=[0, 1, 3, 2, 4])
        # nf x natom x outputs_size x (nei x chnl)
        mat_g = tf.reshape(
            mat_g,
            [nframes, natoms[0], outputs_size, self.nnei * self.type_nchanl])

        # nei x nchnl
        ebd_nei_type = self._type_embed(self.nei_type,
                                        reuse=reuse,
                                        trainable=True,
                                        suffix='')
        # (nei x nchnl)
        ebd_nei_type = tf.reshape(ebd_nei_type, [self.nnei * self.type_nchanl])

        # nf x natom x outputs_size x (nei x chnl)
        mat_g = tf.multiply(mat_g, ebd_nei_type)
        # nf x natom x outputs_size x nei x chnl
        mat_g = tf.reshape(
            mat_g,
            [nframes, natoms[0], outputs_size, self.nnei, self.type_nchanl])
        # nf x natom x outputs_size x nei
        mat_g = tf.reduce_mean(mat_g, axis=4)
        # nf x natom x nei x outputs_size
        mat_g = tf.transpose(mat_g, perm=[0, 1, 3, 2])
        # (nf x natom) x nei x outputs_size
        mat_g = tf.reshape(mat_g,
                           [nframes * natoms[0], self.nnei, outputs_size])
        return mat_g
Exemple #2
0
    def build(self, learning_rate, natoms, model_dict, label_dict, suffix):
        energy = model_dict['energy']
        force = model_dict['force']
        virial = model_dict['virial']
        atom_ener = model_dict['atom_ener']
        energy_hat = label_dict['energy']
        force_hat = label_dict['force']
        virial_hat = label_dict['virial']
        atom_ener_hat = label_dict['atom_ener']
        atom_pref = label_dict['atom_pref']
        find_energy = label_dict['find_energy']
        find_force = label_dict['find_force']
        find_virial = label_dict['find_virial']
        find_atom_ener = label_dict['find_atom_ener']
        find_atom_pref = label_dict['find_atom_pref']

        l2_ener_loss = tf.reduce_mean(tf.square(energy - energy_hat),
                                      name='l2_' + suffix)

        force_reshape = tf.reshape(force, [-1])
        force_hat_reshape = tf.reshape(force_hat, [-1])
        atom_pref_reshape = tf.reshape(atom_pref, [-1])
        diff_f = force_hat_reshape - force_reshape
        if self.relative_f is not None:
            force_hat_3 = tf.reshape(force_hat, [-1, 3])
            norm_f = tf.reshape(tf.norm(force_hat_3, axis=1),
                                [-1, 1]) + self.relative_f
            diff_f_3 = tf.reshape(diff_f, [-1, 3])
            diff_f_3 = diff_f_3 / norm_f
            diff_f = tf.reshape(diff_f_3, [-1])
        l2_force_loss = tf.reduce_mean(tf.square(diff_f),
                                       name="l2_force_" + suffix)
        l2_pref_force_loss = tf.reduce_mean(tf.multiply(
            tf.square(diff_f), atom_pref_reshape),
                                            name="l2_pref_force_" + suffix)

        virial_reshape = tf.reshape(virial, [-1])
        virial_hat_reshape = tf.reshape(virial_hat, [-1])
        l2_virial_loss = tf.reduce_mean(tf.square(virial_hat_reshape -
                                                  virial_reshape),
                                        name="l2_virial_" + suffix)

        atom_ener_reshape = tf.reshape(atom_ener, [-1])
        atom_ener_hat_reshape = tf.reshape(atom_ener_hat, [-1])
        l2_atom_ener_loss = tf.reduce_mean(tf.square(atom_ener_hat_reshape -
                                                     atom_ener_reshape),
                                           name="l2_atom_ener_" + suffix)

        atom_norm = 1. / global_cvt_2_tf_float(natoms[0])
        atom_norm_ener = 1. / global_cvt_2_ener_float(natoms[0])
        pref_e = global_cvt_2_ener_float(
            find_energy * (self.limit_pref_e +
                           (self.start_pref_e - self.limit_pref_e) *
                           learning_rate / self.starter_learning_rate))
        pref_f = global_cvt_2_tf_float(
            find_force * (self.limit_pref_f +
                          (self.start_pref_f - self.limit_pref_f) *
                          learning_rate / self.starter_learning_rate))
        pref_v = global_cvt_2_tf_float(
            find_virial * (self.limit_pref_v +
                           (self.start_pref_v - self.limit_pref_v) *
                           learning_rate / self.starter_learning_rate))
        pref_ae = global_cvt_2_tf_float(
            find_atom_ener * (self.limit_pref_ae +
                              (self.start_pref_ae - self.limit_pref_ae) *
                              learning_rate / self.starter_learning_rate))
        pref_pf = global_cvt_2_tf_float(
            find_atom_pref * (self.limit_pref_pf +
                              (self.start_pref_pf - self.limit_pref_pf) *
                              learning_rate / self.starter_learning_rate))

        l2_loss = 0
        more_loss = {}
        if self.has_e:
            l2_loss += atom_norm_ener * (pref_e * l2_ener_loss)
        more_loss['l2_ener_loss'] = l2_ener_loss
        if self.has_f:
            l2_loss += global_cvt_2_ener_float(pref_f * l2_force_loss)
        more_loss['l2_force_loss'] = l2_force_loss
        if self.has_v:
            l2_loss += global_cvt_2_ener_float(atom_norm *
                                               (pref_v * l2_virial_loss))
        more_loss['l2_virial_loss'] = l2_virial_loss
        if self.has_ae:
            l2_loss += global_cvt_2_ener_float(pref_ae * l2_atom_ener_loss)
        more_loss['l2_atom_ener_loss'] = l2_atom_ener_loss
        if self.has_pf:
            l2_loss += global_cvt_2_ener_float(pref_pf * l2_pref_force_loss)
        more_loss['l2_pref_force_loss'] = l2_pref_force_loss

        self.l2_l = l2_loss
        self.l2_more = more_loss
        return l2_loss, more_loss
Exemple #3
0
    def _type_embedding_net_one_side_aparam(self,
                                            mat_g,
                                            atype,
                                            natoms,
                                            aparam,
                                            name='',
                                            reuse=None,
                                            seed=None,
                                            trainable=True):
        outputs_size = self.filter_neuron[-1]
        nframes = tf.shape(mat_g)[0]
        # (nf x natom x nei) x (outputs_size x chnl x chnl)
        mat_g = tf.reshape(mat_g,
                           [nframes * natoms[0] * self.nnei, outputs_size])
        mat_g = one_layer(mat_g,
                          outputs_size * self.type_nchanl,
                          activation_fn=None,
                          precision=self.filter_precision,
                          name=name + '_amplify',
                          reuse=reuse,
                          seed=self.seed,
                          trainable=trainable)
        # nf x natom x nei x outputs_size x chnl
        mat_g = tf.reshape(
            mat_g,
            [nframes, natoms[0], self.nnei, outputs_size, self.type_nchanl])
        # outputs_size x nf x natom x nei x chnl
        mat_g = tf.transpose(mat_g, perm=[3, 0, 1, 2, 4])
        # outputs_size x (nf x natom x nei x chnl)
        mat_g = tf.reshape(
            mat_g,
            [outputs_size, nframes * natoms[0] * self.nnei * self.type_nchanl])
        # nf x natom x nnei
        embed_type = tf.tile(tf.reshape(self.nei_type, [1, self.nnei]),
                             [nframes * natoms[0], 1])
        # (nf x natom x nnei) x 1
        embed_type = tf.reshape(embed_type,
                                [nframes * natoms[0] * self.nnei, 1])
        # nf x (natom x naparam)
        aparam = tf.reshape(aparam, [nframes, -1])
        # nf x natom x nnei x naparam
        embed_aparam = op_module.map_aparam(aparam,
                                            self.nlist,
                                            natoms,
                                            n_a_sel=self.nnei_a,
                                            n_r_sel=self.nnei_r)
        # (nf x natom x nnei) x naparam
        embed_aparam = tf.reshape(
            embed_aparam, [nframes * natoms[0] * self.nnei, self.numb_aparam])
        # (nf x natom x nnei) x (naparam+1)
        embed_input = tf.concat((embed_type, embed_aparam), axis=1)

        # (nf x natom x nnei) x nchnl
        ebd_nei_type = self._type_embed(embed_input,
                                        ndim=self.numb_aparam + 1,
                                        reuse=reuse,
                                        trainable=True,
                                        suffix='')
        # (nf x natom x nei x nchnl)
        ebd_nei_type = tf.reshape(
            ebd_nei_type, [nframes * natoms[0] * self.nnei * self.type_nchanl])

        # outputs_size x (nf x natom x nei x chnl)
        mat_g = tf.multiply(mat_g, ebd_nei_type)
        # outputs_size x nf x natom x nei x chnl
        mat_g = tf.reshape(
            mat_g,
            [outputs_size, nframes, natoms[0], self.nnei, self.type_nchanl])
        # outputs_size x nf x natom x nei
        mat_g = tf.reduce_mean(mat_g, axis=4)
        # nf x natom x nei x outputs_size
        mat_g = tf.transpose(mat_g, perm=[1, 2, 3, 0])
        # (nf x natom) x nei x outputs_size
        mat_g = tf.reshape(mat_g,
                           [nframes * natoms[0], self.nnei, outputs_size])
        return mat_g