示例#1
0
    def _filter_r(self, 
                  inputs, 
                  type_input,
                  natoms,
                  activation_fn=tf.nn.tanh, 
                  stddev=1.0,
                  bavg=0.0,
                  name='linear', 
                  reuse=None,
                  trainable = True):
        # natom x nei
        outputs_size = [1] + self.filter_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x nei_type_i
                inputs_i = tf.slice (inputs,
                                     [ 0, start_index       ],
                                     [-1, self.sel_r[type_i]] )
                start_index += self.sel_r[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 1
                xyz_scatter = tf.reshape(inputs_i, [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    xyz_scatter = embedding_net(xyz_scatter, 
                                                self.filter_neuron, 
                                                self.filter_precision, 
                                                activation_fn = activation_fn, 
                                                resnet_dt = self.filter_resnet_dt,
                                                name_suffix = "_"+str(type_i),
                                                stddev = stddev,
                                                bavg = bavg,
                                                seed = self.seed,
                                                trainable = trainable, 
                                                uniform_seed = self.uniform_seed)
                    if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
                    # natom x nei_type_i x out_size
                    xyz_scatter = tf.reshape(xyz_scatter, (-1, shape_i[1], outputs_size[-1]))
                else:
                    natom = tf.shape(inputs)[0]
                    xyz_scatter = tf.cast(tf.fill((natom, shape_i[1], outputs_size[-1]), 0.), GLOBAL_TF_FLOAT_PRECISION)
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x outputs_size
            # 
            res_rescale = 1./5.
            result = tf.reduce_mean(xyz_scatter, axis = 1) * res_rescale

        return result
示例#2
0
 def _embedding_net(self,
                    inputs,
                    natoms,
                    filter_neuron,
                    activation_fn=tf.nn.tanh,
                    stddev=1.0,
                    bavg=0.0,
                    name='linear',
                    reuse=None,
                    seed=None,
                    trainable=True):
     '''
     inputs:  nf x na x (nei x 4)
     outputs: nf x na x nei x output_size
     '''
     # natom x (nei x 4)
     inputs = tf.reshape(inputs, [-1, self.ndescrpt])
     shape = inputs.get_shape().as_list()
     outputs_size = [1] + filter_neuron
     with tf.variable_scope(name, reuse=reuse):
         xyz_scatter_total = []
         # with natom x (nei x 4)
         inputs_i = inputs
         shape_i = inputs_i.get_shape().as_list()
         # with (natom x nei) x 4
         inputs_reshape = tf.reshape(inputs_i, [-1, 4])
         # with (natom x nei) x 1
         xyz_scatter = tf.reshape(tf.slice(inputs_reshape, [0, 0], [-1, 1]),
                                  [-1, 1])
         # with (natom x nei) x out_size
         xyz_scatter = embedding_net(xyz_scatter,
                                     self.filter_neuron,
                                     self.filter_precision,
                                     activation_fn=activation_fn,
                                     resnet_dt=self.filter_resnet_dt,
                                     stddev=stddev,
                                     bavg=bavg,
                                     seed=seed,
                                     trainable=trainable)
         # natom x nei x out_size
         xyz_scatter = tf.reshape(xyz_scatter,
                                  (-1, shape_i[1] // 4, outputs_size[-1]))
         xyz_scatter_total.append(xyz_scatter)
     # natom x nei x outputs_size
     xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
     # nf x natom x nei x outputs_size
     xyz_scatter = tf.reshape(
         xyz_scatter,
         [tf.shape(inputs)[0], natoms[0], self.nnei, outputs_size[-1]])
     return xyz_scatter
示例#3
0
 def _enrich(self, dipole, dof = 3):
     coll = []                
     sel_start_idx = 0
     for type_i in range(self.ntypes):
         if type_i in self.sel_type:
             di = tf.slice(dipole, 
                           [ 0, sel_start_idx           * dof],
                           [-1, self.t_natoms[2+type_i] * dof])
             sel_start_idx += self.t_natoms[2+type_i]
         else:
             di = tf.zeros([tf.shape(dipole)[0], self.t_natoms[2+type_i] * dof],
                           dtype = global_tf_float_precision)
         coll.append(di)
     return tf.concat(coll, axis = 1)
示例#4
0
    def _ebd_filter(self,
                    inputs,
                    atype,
                    natoms,
                    input_dict,
                    activation_fn=tf.nn.tanh,
                    stddev=1.0,
                    bavg=0.0,
                    name='linear',
                    reuse=None,
                    seed=None,
                    trainable=True):
        outputs_size = self.filter_neuron[-1]
        outputs_size_2 = self.n_axis_neuron
        # nf x natom x (nei x 4)
        nframes = tf.shape(inputs)[0]
        shape = tf.reshape(inputs, [-1, self.ndescrpt]).get_shape().as_list()

        # nf x natom x nei x outputs_size
        mat_g = self._embedding_net(inputs,
                                    natoms,
                                    self.filter_neuron,
                                    activation_fn=activation_fn,
                                    stddev=stddev,
                                    bavg=bavg,
                                    name=name,
                                    reuse=reuse,
                                    seed=seed,
                                    trainable=trainable)
        # nf x natom x nei x outputs_size
        mat_g = tf.reshape(mat_g,
                           [nframes, natoms[0], self.nnei, outputs_size])

        # (nf x natom) x nei x outputs_size
        if self.type_one_side:
            if self.numb_aparam > 0:
                aparam = input_dict['aparam']
                xyz_scatter \
                    = self._type_embedding_net_one_side_aparam(mat_g,
                                                               atype,
                                                               natoms,
                                                               aparam,
                                                               name = name,
                                                               reuse = reuse,
                                                               seed = seed,
                                                               trainable = trainable)
            else:
                xyz_scatter \
                    = self._type_embedding_net_one_side(mat_g,
                                                        atype,
                                                        natoms,
                                                        name = name,
                                                        reuse = reuse,
                                                        seed = seed,
                                                        trainable = trainable)
        else:
            xyz_scatter \
                = self._type_embedding_net_two_sides(mat_g,
                                                     atype,
                                                     natoms,
                                                     name = name,
                                                     reuse = reuse,
                                                     seed = seed,
                                                     trainable = trainable)

        # natom x nei x 4
        inputs_reshape = tf.reshape(inputs, [-1, shape[1] // 4, 4])
        # natom x 4 x outputs_size
        xyz_scatter_1 = tf.matmul(inputs_reshape,
                                  xyz_scatter,
                                  transpose_a=True)
        xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape[1])
        # natom x 4 x outputs_size_2
        xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                 [-1, -1, outputs_size_2])
        # # natom x 3 x outputs_size_2
        # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1])
        # natom x 3 x outputs_size_1
        qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1])
        # natom x outputs_size_2 x 3
        qmat = tf.transpose(qmat, perm=[0, 2, 1])
        # natom x outputs_size x outputs_size_2
        result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True)
        # natom x (outputs_size x outputs_size_2)
        result = tf.reshape(result, [-1, outputs_size_2 * outputs_size])

        return result, qmat
示例#5
0
    def build(self,
              inputs,
              input_dict,
              natoms,
              bias_atom_e=None,
              reuse=None,
              suffix=''):
        with tf.variable_scope('fitting_attr' + suffix, reuse=reuse):
            t_dfparam = tf.constant(self.numb_fparam,
                                    name='dfparam',
                                    dtype=tf.int32)
        start_index = 0
        inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]])
        shape = inputs.get_shape().as_list()

        if bias_atom_e is not None:
            assert (len(bias_atom_e) == self.ntypes)

        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt],
                                [-1, natoms[2 + type_i] * self.dim_descrpt])
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            start_index += natoms[2 + type_i]
            if bias_atom_e is None:
                type_bias_ae = 0.0
            else:
                type_bias_ae = bias_atom_e[type_i]

            layer = inputs_i
            if self.numb_fparam > 0:
                fparam = input_dict['fparam']
                ext_fparam = tf.reshape(fparam, [-1, self.numb_fparam])
                ext_fparam = tf.tile(ext_fparam, [1, natoms[2 + type_i]])
                ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam])
                layer = tf.concat([layer, ext_fparam], axis=1)
            for ii in range(0, len(self.n_neuron)):
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
                    layer += one_layer(layer,
                                       self.n_neuron[ii],
                                       name='layer_' + str(ii) + '_type_' +
                                       str(type_i) + suffix,
                                       reuse=reuse,
                                       seed=self.seed,
                                       use_timestep=self.resnet_dt)
                else:
                    layer = one_layer(layer,
                                      self.n_neuron[ii],
                                      name='layer_' + str(ii) + '_type_' +
                                      str(type_i) + suffix,
                                      reuse=reuse,
                                      seed=self.seed)
            final_layer = one_layer(layer,
                                    1,
                                    activation_fn=None,
                                    bavg=type_bias_ae,
                                    name='final_layer_type_' + str(type_i) +
                                    suffix,
                                    reuse=reuse,
                                    seed=self.seed)
            final_layer = tf.reshape(final_layer,
                                     [tf.shape(inputs)[0], natoms[2 + type_i]])

            # concat the results
            if type_i == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis=1)

        return tf.reshape(outs, [-1])
示例#6
0
    def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''):
        start_index = 0
        inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]])
        rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])
        shape = inputs.get_shape().as_list()

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt],
                                [-1, natoms[2 + type_i] * self.dim_descrpt])
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat],
                                 [-1, natoms[2 + type_i] * self.dim_rot_mat])
            rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
            start_index += natoms[2 + type_i]
            if not type_i in self.sel_type:
                continue
            layer = inputs_i
            for ii in range(0, len(self.n_neuron)):
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
                    layer += one_layer(layer,
                                       self.n_neuron[ii],
                                       name='layer_' + str(ii) + '_type_' +
                                       str(type_i) + suffix,
                                       reuse=reuse,
                                       seed=self.seed,
                                       use_timestep=self.resnet_dt)
                else:
                    layer = one_layer(layer,
                                      self.n_neuron[ii],
                                      name='layer_' + str(ii) + '_type_' +
                                      str(type_i) + suffix,
                                      reuse=reuse,
                                      seed=self.seed)
            # (nframes x natoms) x (naxis x naxis)
            final_layer = one_layer(layer,
                                    self.dim_rot_mat_1 * self.dim_rot_mat_1,
                                    activation_fn=None,
                                    name='final_layer_type_' + str(type_i) +
                                    suffix,
                                    reuse=reuse,
                                    seed=self.seed)
            # (nframes x natoms) x naxis x naxis
            final_layer = tf.reshape(final_layer, [
                tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1,
                self.dim_rot_mat_1
            ])
            # (nframes x natoms) x naxis x naxis
            final_layer = final_layer + tf.transpose(final_layer,
                                                     perm=[0, 2, 1])
            # (nframes x natoms) x naxis x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # (nframes x natoms) x 3(coord) x 3(coord)
            final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a=True)
            # nframes x natoms x 3 x 3
            final_layer = tf.reshape(
                final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3, 3])

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis=1)
            count += 1

        return tf.reshape(outs, [-1])