Exemplo n.º 1
0
    def build (self, 
               input_d,
               rot_mat,
               natoms,
               reuse = None,
               suffix = '') :
        start_index = 0
        inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]])

        count = 0
        outs_list = []
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice (inputs,
                                 [ 0, start_index*      self.dim_descrpt],
                                 [-1, natoms[2+type_i]* self.dim_descrpt] )
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice (rot_mat,
                                  [ 0, start_index*      9],
                                  [-1, natoms[2+type_i]* 9] )
            rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3])
            start_index += natoms[2+type_i]
            if not type_i in self.sel_type :
                continue
            layer = inputs_i
            for ii in range(0,len(self.n_neuron)) :
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
                    layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision)
                else :
                    layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision)
            # (nframes x natoms) x 9
            final_layer = one_layer(layer, 9, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, final_layer = True)
            # (nframes x natoms) x 3 x 3
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 3, 3])
            # (nframes x natoms) x 3 x 3
            final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1])
            # (nframes x natoms) x 3 x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # (nframes x natoms) x 3(coord) x 3(coord)
            final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True)
            # nframes x natoms x 3 x 3
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3])

            # concat the results
            outs_list.append(final_layer)
            count += 1
        outs = tf.concat(outs_list, axis = 1)

        tf.summary.histogram('fitting_net_output', outs)
        return tf.cast(tf.reshape(outs, [-1]),  GLOBAL_TF_FLOAT_PRECISION)
Exemplo n.º 2
0
    def test_op_tanh(self):
        w = tf.constant(
            [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1, 1.1, 1.2]],
            dtype='double')
        x = tf.constant([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9],
                         [1.0, 1.1, 1.2]],
                        dtype='double')
        b = tf.constant([[0.1], [0.2], [0.3], [0.4]], dtype='double')
        xbar = tf.matmul(x, w) + b
        y = tf.nn.tanh(xbar)
        dy = op_module.unaggregated_dy_dx_s(y, w, xbar, tf.constant(1))
        dy_array = tf.Session().run(dy)
        answer = np.array(
            [[
                8.008666403121351973e-02, 1.513925729426658651e-01,
                2.134733287761668430e-01, 2.661983049806041501e-01
            ],
             [
                 4.010658815015744061e-02, 6.306476628799793926e-02,
                 7.332167904608145881e-02, 7.494218676568849269e-02
             ],
             [
                 1.561705624394135218e-02, 1.994112926507514427e-02,
                 1.887519955881525671e-02, 1.576442161040989692e-02
             ],
             [
                 5.492686739421748753e-03, 5.754985286040992763e-03,
                 4.493113544969218158e-03, 3.107638130764600777e-03
             ]])

        places = 18
        np.testing.assert_almost_equal(dy_array, answer, places)
Exemplo n.º 3
0
    def test_op_gelu(self):
        w = tf.constant(
            [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1, 1.1, 1.2]],
            dtype='double')
        x = tf.constant([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9],
                         [1.0, 1.1, 1.2]],
                        dtype='double')
        b = tf.constant([[0.1], [0.2], [0.3], [0.4]], dtype='double')
        xbar = tf.matmul(x, w) + b
        y = gelu(xbar)
        dy = op_module.unaggregated_dy_dx_s(y, w, xbar, tf.constant(2))
        dy_array = tf.Session().run(dy)
        answer = np.array(
            [[
                8.549286163555620821e-02, 1.782905778685600906e-01,
                2.776474599997448833e-01, 3.827650237273348965e-01
            ],
             [
                 1.089906023807040714e-01, 2.230820937721638697e-01,
                 3.381867859682909927e-01, 4.513008399758057232e-01
             ],
             [
                 1.124254240556722684e-01, 2.209918074710395253e-01,
                 3.238894323148118759e-01, 4.220357318198978414e-01
             ],
             [
                 1.072173273655498138e-01, 2.082159073100979807e-01,
                 3.059816075270163083e-01, 4.032981557798429595e-01
             ]])

        places = 18
        np.testing.assert_almost_equal(dy_array, answer, places)
Exemplo n.º 4
0
 def _net(self, inputs, name, reuse=False):
     with tf.variable_scope(name, reuse=reuse):
         net_w = tf.get_variable('net_w', [self.ndescrpt],
                                 GLOBAL_TF_FLOAT_PRECISION,
                                 tf.constant_initializer(self.net_w_i))
     dot_v = tf.matmul(tf.reshape(inputs, [-1, self.ndescrpt]),
                       tf.reshape(net_w, [self.ndescrpt, 1]))
     return tf.reshape(dot_v, [-1])
 def _net(self, inputs, name, reuse=False):
     with tf.variable_scope(name, reuse=reuse):
         net_w = tf.get_variable('net_w', [self.ndescrpt],
                                 global_tf_float_precision,
                                 tf.constant_initializer(self.net_w_i))
     dot_v = tf.matmul(tf.reshape(inputs, [-1, self.ndescrpt]),
                       tf.reshape(net_w, [self.ndescrpt, 1]))
     return tf.reshape(dot_v, [-1])
Exemplo n.º 6
0
    def _make_data(self, xx, idx):
        with self.sub_graph.as_default():
            with self.sub_sess.as_default():
                xx = tf.reshape(xx, [xx.size, -1])
                for layer in range(self.layer_size):
                    if layer == 0:
                        xbar = tf.matmul(
                        xx, self.matrix["layer_" + str(layer + 1)][idx]) + self.bias["layer_" + str(layer + 1)][idx]
                        if self.neuron[0] == 1:
                            yy = self._layer_0(
                                xx, self.matrix["layer_" + str(layer + 1)][idx], self.bias["layer_" + str(layer + 1)][idx]) + xx
                            dy = op_module.unaggregated_dy_dx_s(
                                yy, self.matrix["layer_" + str(layer + 1)][idx], xbar, tf.constant(self.functype)) + tf.ones([1, 1], yy.dtype)
                            dy2 = op_module.unaggregated_dy2_dx_s(
                                yy, dy, self.matrix["layer_" + str(layer + 1)][idx], xbar, tf.constant(self.functype))
                        elif self.neuron[0] == 2:
                            tt, yy = self._layer_1(
                                xx, self.matrix["layer_" + str(layer + 1)][idx], self.bias["layer_" + str(layer + 1)][idx])
                            dy = op_module.unaggregated_dy_dx_s(
                                yy - tt, self.matrix["layer_" + str(layer + 1)][idx], xbar, tf.constant(self.functype)) + tf.ones([1, 2], yy.dtype)
                            dy2 = op_module.unaggregated_dy2_dx_s(
                                yy - tt, dy, self.matrix["layer_" + str(layer + 1)][idx], xbar, tf.constant(self.functype))
                        else:
                            yy = self._layer_0(
                                xx, self.matrix["layer_" + str(layer + 1)][idx], self.bias["layer_" + str(layer + 1)][idx])
                            dy = op_module.unaggregated_dy_dx_s(
                                yy, self.matrix["layer_" + str(layer + 1)][idx], xbar, tf.constant(self.functype))
                            dy2 = op_module.unaggregated_dy2_dx_s(
                                yy, dy, self.matrix["layer_" + str(layer + 1)][idx], xbar, tf.constant(self.functype))
                    else:
                        ybar = tf.matmul(
                            yy, self.matrix["layer_" + str(layer + 1)][idx]) + self.bias["layer_" + str(layer + 1)][idx]
                        tt, zz = self._layer_1(
                            yy, self.matrix["layer_" + str(layer + 1)][idx], self.bias["layer_" + str(layer + 1)][idx])
                        dz = op_module.unaggregated_dy_dx(
                            zz - tt, self.matrix["layer_" + str(layer + 1)][idx], dy, ybar, tf.constant(self.functype))
                        dy2 = op_module.unaggregated_dy2_dx(
                            zz - tt, self.matrix["layer_" + str(layer + 1)][idx], dy, dy2, ybar, tf.constant(self.functype))
                        dy = dz
                        yy = zz

                vv = zz.eval()
                dd = dy.eval()
                d2 = dy2.eval()
        return vv, dd, d2
Exemplo n.º 7
0
def one_layer(inputs,
              outputs_size,
              activation_fn=tf.nn.tanh,
              precision=global_tf_float_precision,
              stddev=1.0,
              bavg=0.0,
              name='linear',
              reuse=None,
              seed=None,
              use_timestep=False,
              trainable=True,
              useBN=False):
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w = tf.get_variable(
            'matrix', [shape[1], outputs_size],
            precision,
            tf.random_normal_initializer(stddev=stddev /
                                         np.sqrt(shape[1] + outputs_size),
                                         seed=seed),
            trainable=trainable)
        b = tf.get_variable('bias', [outputs_size],
                            precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
        hidden = tf.matmul(inputs, w) + b
        if activation_fn != None and use_timestep:
            idt = tf.get_variable('idt', [outputs_size],
                                  precision,
                                  tf.random_normal_initializer(stddev=0.001,
                                                               mean=0.1,
                                                               seed=seed),
                                  trainable=trainable)
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
                # return activation_fn(hidden_bn)
            else:
                if use_timestep:
                    return tf.reshape(activation_fn(hidden),
                                      [-1, outputs_size]) * idt
                else:
                    return tf.reshape(activation_fn(hidden),
                                      [-1, outputs_size])
        else:
            if useBN:
                None
                # return self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
            else:
                return hidden
Exemplo n.º 8
0
    def build(self, learning_rate, natoms, model_dict, label_dict, suffix):
        coord = model_dict['coord']
        energy = model_dict['energy']
        atom_ener = model_dict['atom_ener']
        nframes = tf.shape(atom_ener)[0]
        natoms = tf.shape(atom_ener)[1]
        # build energy dipole
        atom_ener0 = atom_ener - tf.reshape(
            tf.tile(
                tf.reshape(energy / global_cvt_2_ener_float(natoms), [-1, 1]),
                [1, natoms]), [nframes, natoms])
        coord = tf.reshape(coord, [nframes, natoms, 3])
        atom_ener0 = tf.reshape(atom_ener0, [nframes, 1, natoms])
        ener_dipole = tf.matmul(atom_ener0, coord)
        ener_dipole = tf.reshape(ener_dipole, [nframes, 3])

        energy_hat = label_dict['energy']
        ener_dipole_hat = label_dict['energy_dipole']
        find_energy = label_dict['find_energy']
        find_ener_dipole = label_dict['find_energy_dipole']

        l2_ener_loss = tf.reduce_mean(tf.square(energy - energy_hat),
                                      name='l2_' + suffix)

        ener_dipole_reshape = tf.reshape(ener_dipole, [-1])
        ener_dipole_hat_reshape = tf.reshape(ener_dipole_hat, [-1])
        l2_ener_dipole_loss = tf.reduce_mean(
            tf.square(ener_dipole_reshape - ener_dipole_hat_reshape),
            name='l2_' + suffix)

        # atom_norm_ener  = 1./ global_cvt_2_ener_float(natoms[0])
        atom_norm_ener = 1. / global_cvt_2_ener_float(natoms)
        pref_e = global_cvt_2_ener_float(
            find_energy * (self.limit_pref_e +
                           (self.start_pref_e - self.limit_pref_e) *
                           learning_rate / self.starter_learning_rate))
        pref_ed = global_cvt_2_tf_float(
            find_ener_dipole * (self.limit_pref_ed +
                                (self.start_pref_ed - self.limit_pref_ed) *
                                learning_rate / self.starter_learning_rate))

        l2_loss = 0
        more_loss = {}
        l2_loss += atom_norm_ener * (pref_e * l2_ener_loss)
        l2_loss += global_cvt_2_ener_float(pref_ed * l2_ener_dipole_loss)
        more_loss['l2_ener_loss'] = l2_ener_loss
        more_loss['l2_ener_dipole_loss'] = l2_ener_dipole_loss

        self.l2_l = l2_loss
        self.l2_more = more_loss
        return l2_loss, more_loss
Exemplo n.º 9
0
    def build (self, 
               input_d,
               rot_mat,
               natoms,
               reuse = None,
               suffix = '') :
        start_index = 0
        inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]])

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice (inputs,
                                 [ 0, start_index*      self.dim_descrpt],
                                 [-1, natoms[2+type_i]* self.dim_descrpt] )
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice (rot_mat,
                                  [ 0, start_index*      9],
                                  [-1, natoms[2+type_i]* 9] )
            rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3])
            start_index += natoms[2+type_i]
            if not type_i in self.sel_type :
                continue
            layer = inputs_i
            for ii in range(0,len(self.n_neuron)) :
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
                    layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
                else :
                    layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
                if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
            # (nframes x natoms) x (nwfc x 3)
            final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
            if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
            # (nframes x natoms) x nwfc(wc) x 3(coord_local)
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.wfc_numb, 3])
            # (nframes x natoms) x nwfc(wc) x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # nframes x natoms x nwfc(wc) x 3(coord_local)
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], self.wfc_numb, 3])

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis = 1)
            count += 1
        
        tf.summary.histogram('fitting_net_output', outs)
        return tf.cast(tf.reshape(outs, [-1]),  GLOBAL_TF_FLOAT_PRECISION)
Exemplo n.º 10
0
 def _layer_1(self, x, w, b):
     t = tf.concat([x, x], axis=1)
     return t, self.activation_fn(tf.matmul(x, w) + b) + t
Exemplo n.º 11
0
 def _layer_0(self, x, w, b):
     return self.activation_fn(tf.matmul(x, w) + b)
Exemplo n.º 12
0
    def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''):
        start_index = 0
        inputs = tf.cast(
            tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]),
            self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt],
                                [-1, natoms[2 + type_i] * self.dim_descrpt])
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat],
                                 [-1, natoms[2 + type_i] * self.dim_rot_mat])
            rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
            start_index += natoms[2 + type_i]
            if not type_i in self.sel_type:
                continue
            layer = inputs_i
            for ii in range(0, len(self.n_neuron)):
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
                    layer += one_layer(
                        layer,
                        self.n_neuron[ii],
                        name='layer_' + str(ii) + '_type_' + str(type_i) +
                        suffix,
                        reuse=reuse,
                        seed=self.seed,
                        use_timestep=self.resnet_dt,
                        activation_fn=self.fitting_activation_fn,
                        precision=self.fitting_precision)
                else:
                    layer = one_layer(layer,
                                      self.n_neuron[ii],
                                      name='layer_' + str(ii) + '_type_' +
                                      str(type_i) + suffix,
                                      reuse=reuse,
                                      seed=self.seed,
                                      activation_fn=self.fitting_activation_fn,
                                      precision=self.fitting_precision)
            # (nframes x natoms) x naxis
            final_layer = one_layer(layer,
                                    self.dim_rot_mat_1,
                                    activation_fn=None,
                                    name='final_layer_type_' + str(type_i) +
                                    suffix,
                                    reuse=reuse,
                                    seed=self.seed,
                                    precision=self.fitting_precision)
            # (nframes x natoms) x 1 * naxis
            final_layer = tf.reshape(final_layer, [
                tf.shape(inputs)[0] * natoms[2 + type_i], 1, self.dim_rot_mat_1
            ])
            # (nframes x natoms) x 1 x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # nframes x natoms x 3
            final_layer = tf.reshape(
                final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3])

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis=1)
            count += 1

        return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
Exemplo n.º 13
0
    def _filter(self,
                inputs,
                type_input,
                natoms,
                activation_fn=tf.nn.tanh,
                stddev=1.0,
                bavg=0.0,
                name='linear',
                reuse=None,
                seed=None,
                trainable=True):
        # natom x (nei x 4)
        shape = inputs.get_shape().as_list()
        outputs_size = [1] + self.filter_neuron
        outputs_size_2 = self.n_axis_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x (nei_type_i x 4)
                inputs_i = tf.slice(inputs, [0, start_index * 4],
                                    [-1, self.sel_a[type_i] * 4])
                start_index += self.sel_a[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 4
                inputs_reshape = tf.reshape(inputs_i, [-1, 4])
                xyz_scatter = tf.reshape(
                    tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    for ii in range(1, len(outputs_size)):
                        w = tf.get_variable(
                            'matrix_' + str(ii) + '_' + str(type_i),
                            [outputs_size[ii - 1], outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(
                                stddev=stddev / np.sqrt(outputs_size[ii] +
                                                        outputs_size[ii - 1]),
                                seed=seed),
                            trainable=trainable)
                        b = tf.get_variable(
                            'bias_' + str(ii) + '_' + str(type_i),
                            [1, outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
                        if self.filter_resnet_dt:
                            idt = tf.get_variable(
                                'idt_' + str(ii) + '_' + str(type_i),
                                [1, outputs_size[ii]],
                                self.filter_precision,
                                tf.random_normal_initializer(stddev=0.001,
                                                             mean=1.0,
                                                             seed=seed),
                                trainable=trainable)
                        if outputs_size[ii] == outputs_size[ii - 1]:
                            if self.filter_resnet_dt:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b)
                        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
                            if self.filter_resnet_dt:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b)
                        else:
                            xyz_scatter = activation_fn(
                                tf.matmul(xyz_scatter, w) + b)
                else:
                    w = tf.zeros((outputs_size[0], outputs_size[-1]),
                                 dtype=global_tf_float_precision)
                    xyz_scatter = tf.matmul(xyz_scatter, w)
                # natom x nei_type_i x out_size
                xyz_scatter = tf.reshape(
                    xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1]))
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x nei x 4
            inputs_reshape = tf.reshape(inputs, [-1, shape[1] // 4, 4])
            # natom x 4 x outputs_size
            xyz_scatter_1 = tf.matmul(inputs_reshape,
                                      xyz_scatter,
                                      transpose_a=True)
            xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape[1])
            # natom x 4 x outputs_size_2
            xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                     [-1, -1, outputs_size_2])
            # # natom x 3 x outputs_size_2
            # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1])
            # natom x 3 x outputs_size_1
            qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1])
            # natom x outputs_size_2 x 3
            qmat = tf.transpose(qmat, perm=[0, 2, 1])
            # natom x outputs_size x outputs_size_2
            result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True)
            # natom x (outputs_size x outputs_size_2)
            result = tf.reshape(result,
                                [-1, outputs_size_2 * outputs_size[-1]])

        return result, qmat
Exemplo n.º 14
0
    def _filter_r(self,
                  inputs,
                  type_input,
                  natoms,
                  activation_fn=tf.nn.tanh,
                  stddev=1.0,
                  bavg=0.0,
                  name='linear',
                  reuse=None,
                  seed=None,
                  trainable=True):
        # natom x nei
        outputs_size = [1] + self.filter_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x nei_type_i
                inputs_i = tf.slice(inputs, [0, start_index],
                                    [-1, self.sel_r[type_i]])
                start_index += self.sel_r[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 1
                xyz_scatter = tf.reshape(inputs_i, [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    for ii in range(1, len(outputs_size)):
                        w = tf.get_variable(
                            'matrix_' + str(ii) + '_' + str(type_i),
                            [outputs_size[ii - 1], outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(
                                stddev=stddev / np.sqrt(outputs_size[ii] +
                                                        outputs_size[ii - 1]),
                                seed=seed),
                            trainable=trainable)
                        b = tf.get_variable(
                            'bias_' + str(ii) + '_' + str(type_i),
                            [1, outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
                        if self.filter_resnet_dt:
                            idt = tf.get_variable(
                                'idt_' + str(ii) + '_' + str(type_i),
                                [1, outputs_size[ii]],
                                self.filter_precision,
                                tf.random_normal_initializer(stddev=0.001,
                                                             mean=1.0,
                                                             seed=seed),
                                trainable=trainable)
                        if outputs_size[ii] == outputs_size[ii - 1]:
                            if self.filter_resnet_dt:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b)
                        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
                            if self.filter_resnet_dt:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b)
                        else:
                            xyz_scatter = activation_fn(
                                tf.matmul(xyz_scatter, w) + b)
                else:
                    w = tf.zeros((outputs_size[0], outputs_size[-1]),
                                 dtype=global_tf_float_precision)
                    xyz_scatter = tf.matmul(xyz_scatter, w)
                # natom x nei_type_i x out_size
                xyz_scatter = tf.reshape(xyz_scatter,
                                         (-1, shape_i[1], outputs_size[-1]))
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x outputs_size
            #
            res_rescale = 1. / 5.
            result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale

        return result
Exemplo n.º 15
0
def embedding_net(xx,
                  network_size,
                  precision,
                  activation_fn=tf.nn.tanh,
                  resnet_dt=False,
                  name_suffix='',
                  stddev=1.0,
                  bavg=0.0,
                  seed=None,
                  trainable=True,
                  uniform_seed=False,
                  initial_variables=None,
                  mixed_prec=None):
    r"""The embedding network.

    The embedding network function :math:`\mathcal{N}` is constructed by is the
    composition of multiple layers :math:`\mathcal{L}^{(i)}`:

    .. math::
        \mathcal{N} = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)}
        \circ \cdots \circ \mathcal{L}^{(1)}

    A layer :math:`\mathcal{L}` is given by one of the following forms,
    depending on the number of nodes: [1]_

    .. math::
        \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})=
        \begin{cases}
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + \mathbf{x}, & N_2=N_1 \\
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + (\mathbf{x}, \mathbf{x}), & N_2 = 2N_1\\
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}), & \text{otherwise} \\
        \end{cases}

    where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}`
    is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and
    :math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively,
    both of which are trainable if `trainable` is `True`. :math:`\boldsymbol{\phi}`
    is the activation function.

    Parameters
    ----------
    xx : Tensor   
        Input tensor :math:`\mathbf{x}` of shape [-1,1]
    network_size: list of int
        Size of the embedding network. For example [16,32,64]
    precision: 
        Precision of network weights. For example, tf.float64
    activation_fn:
        Activation function :math:`\boldsymbol{\phi}`
    resnet_dt: boolean
        Using time-step in the ResNet construction
    name_suffix: str
        The name suffix append to each variable. 
    stddev: float
        Standard deviation of initializing network parameters
    bavg: float
        Mean of network intial bias
    seed: int
        Random seed for initializing network parameters
    trainable: boolean
        If the network is trainable
    uniform_seed : boolean
        Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed
    initial_variables : dict
        The input dict which stores the embedding net variables
    mixed_prec
        The input dict which stores the mixed precision setting for the embedding net


    References
    ----------
    .. [1] Kaiming  He,  Xiangyu  Zhang,  Shaoqing  Ren,  and  Jian  Sun. Identitymappings
       in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer
       International Publishing, 2016.
    """
    input_shape = xx.get_shape().as_list()
    outputs_size = [input_shape[1]] + network_size

    for ii in range(1, len(outputs_size)):
        w_initializer = tf.random_normal_initializer(
            stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]),
            seed=seed if (seed is None or uniform_seed) else seed + ii * 3 + 0)
        b_initializer = tf.random_normal_initializer(
            stddev=stddev,
            mean=bavg,
            seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 1)
        if initial_variables is not None:
            scope = tf.get_variable_scope().name
            w_initializer = tf.constant_initializer(
                initial_variables[scope + '/matrix_' + str(ii) + name_suffix])
            b_initializer = tf.constant_initializer(
                initial_variables[scope + '/bias_' + str(ii) + name_suffix])
        w = tf.get_variable('matrix_' + str(ii) + name_suffix,
                            [outputs_size[ii - 1], outputs_size[ii]],
                            precision,
                            w_initializer,
                            trainable=trainable)
        variable_summaries(w, 'matrix_' + str(ii) + name_suffix)

        b = tf.get_variable('bias_' + str(ii) + name_suffix,
                            [outputs_size[ii]],
                            precision,
                            b_initializer,
                            trainable=trainable)
        variable_summaries(b, 'bias_' + str(ii) + name_suffix)

        if mixed_prec is not None:
            xx = tf.cast(xx, get_precision(mixed_prec['compute_prec']))
            w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
            b = tf.cast(b, get_precision(mixed_prec['compute_prec']))
        hidden = tf.reshape(activation_fn(tf.nn.bias_add(tf.matmul(xx, w), b)),
                            [-1, outputs_size[ii]])
        if resnet_dt:
            idt_initializer = tf.random_normal_initializer(
                stddev=0.001,
                mean=1.0,
                seed=seed if
                (seed is None or uniform_seed) else seed + 3 * ii + 2)
            if initial_variables is not None:
                scope = tf.get_variable_scope().name
                idt_initializer = tf.constant_initializer(
                    initial_variables[scope + '/idt_' + str(ii) + name_suffix])
            idt = tf.get_variable('idt_' + str(ii) + name_suffix,
                                  [1, outputs_size[ii]],
                                  precision,
                                  idt_initializer,
                                  trainable=trainable)
            variable_summaries(idt, 'idt_' + str(ii) + name_suffix)
            if mixed_prec is not None:
                idt = tf.cast(idt, get_precision(mixed_prec['compute_prec']))

        if outputs_size[ii] == outputs_size[ii - 1]:
            if resnet_dt:
                xx += hidden * idt
            else:
                xx += hidden
        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
            if resnet_dt:
                xx = tf.concat([xx, xx], 1) + hidden * idt
            else:
                xx = tf.concat([xx, xx], 1) + hidden
        else:
            xx = hidden
    if mixed_prec is not None:
        xx = tf.cast(xx, get_precision(mixed_prec['output_prec']))
    return xx
Exemplo n.º 16
0
    def build (self, 
               input_d : tf.Tensor,
               rot_mat : tf.Tensor,
               natoms : tf.Tensor,
               reuse : bool = None,
               suffix : str = '') :
        """
        Build the computational graph for fitting net
        
        Parameters
        ----------
        input_d
                The input descriptor
        rot_mat
                The rotation matrix from the descriptor.
        natoms
                The number of atoms. This tensor has the length of Ntypes + 2
                natoms[0]: number of local atoms
                natoms[1]: total number of atoms held by this processor
                natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
        reuse
                The weights in the networks should be reused when get the variable.
        suffix
                Name suffix to identify this descriptor

        Returns
        -------
        atomic_polar
                The atomic polarizability        
        """
        start_index = 0
        inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]])
        rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])

        count = 0
        outs_list = []
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice (inputs,
                                 [ 0, start_index*      self.dim_descrpt],
                                 [-1, natoms[2+type_i]* self.dim_descrpt] )
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice (rot_mat,
                                  [ 0, start_index*      self.dim_rot_mat],
                                  [-1, natoms[2+type_i]* self.dim_rot_mat] )
            rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
            start_index += natoms[2+type_i]
            if not type_i in self.sel_type :
                continue
            layer = inputs_i
            for ii in range(0,len(self.n_neuron)) :
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
                    layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed, initial_variables = self.fitting_net_variables, mixed_prec = self.mixed_prec)
                else :
                    layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed, initial_variables = self.fitting_net_variables, mixed_prec = self.mixed_prec)
                if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
            if self.fit_diag :
                bavg = np.zeros(self.dim_rot_mat_1)
                # bavg[0] = self.avgeig[0]
                # bavg[1] = self.avgeig[1]
                # bavg[2] = self.avgeig[2]
                # (nframes x natoms) x naxis
                final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision, uniform_seed = self.uniform_seed, initial_variables = self.fitting_net_variables, mixed_prec = self.mixed_prec, final_layer = True)
                if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
                # (nframes x natoms) x naxis
                final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1])
                # (nframes x natoms) x naxis x naxis
                final_layer = tf.matrix_diag(final_layer)                
            else :
                bavg = np.zeros(self.dim_rot_mat_1*self.dim_rot_mat_1)
                # bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0]
                # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1]
                # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2]
                # (nframes x natoms) x (naxis x naxis)
                final_layer = one_layer(layer, self.dim_rot_mat_1*self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision, uniform_seed = self.uniform_seed, initial_variables = self.fitting_net_variables, mixed_prec = self.mixed_prec, final_layer = True)
                if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
                # (nframes x natoms) x naxis x naxis
                final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1, self.dim_rot_mat_1])
                # (nframes x natoms) x naxis x naxis
                final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1])
            # (nframes x natoms) x naxis x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # (nframes x natoms) x 3(coord) x 3(coord)
            final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True)
            # nframes x natoms x 3 x 3
            final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3])
            # shift and scale
            sel_type_idx = self.sel_type.index(type_i)
            final_layer = final_layer * self.scale[sel_type_idx]
            final_layer = final_layer + self.constant_matrix[sel_type_idx] * tf.eye(3, batch_shape=[tf.shape(inputs)[0], natoms[2+type_i]], dtype = GLOBAL_TF_FLOAT_PRECISION)

            # concat the results
            outs_list.append(final_layer)
            count += 1
        outs = tf.concat(outs_list, axis = 1)
        
        tf.summary.histogram('fitting_net_output', outs)
        return tf.reshape(outs, [-1])
Exemplo n.º 17
0
def one_layer(inputs, 
              outputs_size, 
              activation_fn=tf.nn.tanh, 
              precision = GLOBAL_TF_FLOAT_PRECISION, 
              stddev=1.0,
              bavg=0.0,
              name='linear', 
              reuse=None,
              seed=None, 
              use_timestep = False, 
              trainable = True,
              useBN = False, 
              uniform_seed = False,
              initial_variables = None):
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w_initializer  = tf.random_normal_initializer(
                            stddev=stddev / np.sqrt(shape[1] + outputs_size),
                            seed=seed if (seed is None or uniform_seed) else seed + 0)
        b_initializer  = tf.random_normal_initializer(
                            stddev=stddev,
                            mean=bavg,
                            seed=seed if (seed is None or uniform_seed) else seed + 1)
        if initial_variables is not None:
            w_initializer = tf.constant_initializer(initial_variables[name + '/matrix'])
            b_initializer = tf.constant_initializer(initial_variables[name + '/bias'])
        w = tf.get_variable('matrix', 
                            [shape[1], outputs_size], 
                            precision,
                            w_initializer, 
                            trainable = trainable)
        variable_summaries(w, 'matrix')
        b = tf.get_variable('bias', 
                            [outputs_size], 
                            precision,
                            b_initializer, 
                            trainable = trainable)
        variable_summaries(b, 'bias')
        hidden = tf.matmul(inputs, w) + b
        if activation_fn != None and use_timestep :
            idt_initializer = tf.random_normal_initializer(
                                    stddev=0.001,
                                    mean=0.1,
                                    seed=seed if (seed is None or uniform_seed) else seed + 2)
            if initial_variables is not None:
                idt_initializer = tf.constant_initializer(initial_variables[name + '/idt'])
            idt = tf.get_variable('idt',
                                  [outputs_size],
                                  precision,
                                  idt_initializer, 
                                  trainable = trainable)
            variable_summaries(idt, 'idt')
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)   
                # return activation_fn(hidden_bn)
            else:
                if use_timestep :
                    return tf.reshape(activation_fn(hidden), [-1, outputs_size]) * idt
                else :
                    return tf.reshape(activation_fn(hidden), [-1, outputs_size])                    
        else:
            if useBN:
                None
                # return self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
            else:
                return hidden
Exemplo n.º 18
0
    def _ebd_filter(self,
                    inputs,
                    atype,
                    natoms,
                    input_dict,
                    activation_fn=tf.nn.tanh,
                    stddev=1.0,
                    bavg=0.0,
                    name='linear',
                    reuse=None,
                    seed=None,
                    trainable=True):
        outputs_size = self.filter_neuron[-1]
        outputs_size_2 = self.n_axis_neuron
        # nf x natom x (nei x 4)
        nframes = tf.shape(inputs)[0]
        shape = tf.reshape(inputs, [-1, self.ndescrpt]).get_shape().as_list()

        # nf x natom x nei x outputs_size
        mat_g = self._embedding_net(inputs,
                                    natoms,
                                    self.filter_neuron,
                                    activation_fn=activation_fn,
                                    stddev=stddev,
                                    bavg=bavg,
                                    name=name,
                                    reuse=reuse,
                                    seed=seed,
                                    trainable=trainable)
        # nf x natom x nei x outputs_size
        mat_g = tf.reshape(mat_g,
                           [nframes, natoms[0], self.nnei, outputs_size])

        # (nf x natom) x nei x outputs_size
        if self.type_one_side:
            if self.numb_aparam > 0:
                aparam = input_dict['aparam']
                xyz_scatter \
                    = self._type_embedding_net_one_side_aparam(mat_g,
                                                               atype,
                                                               natoms,
                                                               aparam,
                                                               name = name,
                                                               reuse = reuse,
                                                               seed = seed,
                                                               trainable = trainable)
            else:
                xyz_scatter \
                    = self._type_embedding_net_one_side(mat_g,
                                                        atype,
                                                        natoms,
                                                        name = name,
                                                        reuse = reuse,
                                                        seed = seed,
                                                        trainable = trainable)
        else:
            xyz_scatter \
                = self._type_embedding_net_two_sides(mat_g,
                                                     atype,
                                                     natoms,
                                                     name = name,
                                                     reuse = reuse,
                                                     seed = seed,
                                                     trainable = trainable)

        # natom x nei x 4
        inputs_reshape = tf.reshape(inputs, [-1, shape[1] // 4, 4])
        # natom x 4 x outputs_size
        xyz_scatter_1 = tf.matmul(inputs_reshape,
                                  xyz_scatter,
                                  transpose_a=True)
        xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape[1])
        # natom x 4 x outputs_size_2
        xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                 [-1, -1, outputs_size_2])
        # # natom x 3 x outputs_size_2
        # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1])
        # natom x 3 x outputs_size_1
        qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1])
        # natom x outputs_size_2 x 3
        qmat = tf.transpose(qmat, perm=[0, 2, 1])
        # natom x outputs_size x outputs_size_2
        result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True)
        # natom x (outputs_size x outputs_size_2)
        result = tf.reshape(result, [-1, outputs_size_2 * outputs_size])

        return result, qmat
Exemplo n.º 19
0
def one_layer(inputs,
              outputs_size,
              activation_fn=tf.nn.tanh,
              precision=GLOBAL_TF_FLOAT_PRECISION,
              stddev=1.0,
              bavg=0.0,
              name='linear',
              reuse=None,
              seed=None,
              use_timestep=False,
              trainable=True,
              useBN=False,
              uniform_seed=False,
              initial_variables=None,
              mixed_prec=None,
              final_layer=False):
    # For good accuracy, the last layer of the fitting network uses a higher precision neuron network.
    if mixed_prec is not None and final_layer:
        inputs = tf.cast(inputs, get_precision(mixed_prec['output_prec']))
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w_initializer = tf.random_normal_initializer(
            stddev=stddev / np.sqrt(shape[1] + outputs_size),
            seed=seed if (seed is None or uniform_seed) else seed + 0)
        b_initializer = tf.random_normal_initializer(
            stddev=stddev,
            mean=bavg,
            seed=seed if (seed is None or uniform_seed) else seed + 1)
        if initial_variables is not None:
            w_initializer = tf.constant_initializer(
                initial_variables[name + '/matrix'])
            b_initializer = tf.constant_initializer(initial_variables[name +
                                                                      '/bias'])
        w = tf.get_variable('matrix', [shape[1], outputs_size],
                            precision,
                            w_initializer,
                            trainable=trainable)
        variable_summaries(w, 'matrix')
        b = tf.get_variable('bias', [outputs_size],
                            precision,
                            b_initializer,
                            trainable=trainable)
        variable_summaries(b, 'bias')

        if mixed_prec is not None and not final_layer:
            inputs = tf.cast(inputs, get_precision(mixed_prec['compute_prec']))
            w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
            b = tf.cast(b, get_precision(mixed_prec['compute_prec']))

        hidden = tf.nn.bias_add(tf.matmul(inputs, w), b)
        if activation_fn != None and use_timestep:
            idt_initializer = tf.random_normal_initializer(
                stddev=0.001,
                mean=0.1,
                seed=seed if (seed is None or uniform_seed) else seed + 2)
            if initial_variables is not None:
                idt_initializer = tf.constant_initializer(
                    initial_variables[name + '/idt'])
            idt = tf.get_variable('idt', [outputs_size],
                                  precision,
                                  idt_initializer,
                                  trainable=trainable)
            variable_summaries(idt, 'idt')
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
                # return activation_fn(hidden_bn)
            else:
                if use_timestep:
                    if mixed_prec is not None and not final_layer:
                        idt = tf.cast(
                            idt, get_precision(mixed_prec['compute_prec']))
                    hidden = tf.reshape(activation_fn(hidden),
                                        [-1, outputs_size]) * idt
                else:
                    hidden = tf.reshape(activation_fn(hidden),
                                        [-1, outputs_size])

        if mixed_prec is not None:
            hidden = tf.cast(hidden, get_precision(mixed_prec['output_prec']))
        return hidden
Exemplo n.º 20
0
    def build(self,
              input_d: tf.Tensor,
              rot_mat: tf.Tensor,
              natoms: tf.Tensor,
              reuse: bool = None,
              suffix: str = '') -> tf.Tensor:
        """
        Build the computational graph for fitting net
        
        Parameters
        ----------
        input_d
                The input descriptor
        rot_mat
                The rotation matrix from the descriptor.
        natoms
                The number of atoms. This tensor has the length of Ntypes + 2
                natoms[0]: number of local atoms
                natoms[1]: total number of atoms held by this processor
                natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
        reuse
                The weights in the networks should be reused when get the variable.
        suffix
                Name suffix to identify this descriptor

        Returns
        -------
        dipole
                The atomic dipole.
        """
        start_index = 0
        inputs = tf.cast(
            tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]),
            self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt],
                                [-1, natoms[2 + type_i] * self.dim_descrpt])
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat],
                                 [-1, natoms[2 + type_i] * self.dim_rot_mat])
            rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
            start_index += natoms[2 + type_i]
            if not type_i in self.sel_type:
                continue
            layer = inputs_i
            for ii in range(0, len(self.n_neuron)):
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
                    layer += one_layer(
                        layer,
                        self.n_neuron[ii],
                        name='layer_' + str(ii) + '_type_' + str(type_i) +
                        suffix,
                        reuse=reuse,
                        seed=self.seed,
                        use_timestep=self.resnet_dt,
                        activation_fn=self.fitting_activation_fn,
                        precision=self.fitting_precision,
                        uniform_seed=self.uniform_seed)
                else:
                    layer = one_layer(layer,
                                      self.n_neuron[ii],
                                      name='layer_' + str(ii) + '_type_' +
                                      str(type_i) + suffix,
                                      reuse=reuse,
                                      seed=self.seed,
                                      activation_fn=self.fitting_activation_fn,
                                      precision=self.fitting_precision,
                                      uniform_seed=self.uniform_seed)
                if (not self.uniform_seed) and (self.seed is not None):
                    self.seed += self.seed_shift
            # (nframes x natoms) x naxis
            final_layer = one_layer(layer,
                                    self.dim_rot_mat_1,
                                    activation_fn=None,
                                    name='final_layer_type_' + str(type_i) +
                                    suffix,
                                    reuse=reuse,
                                    seed=self.seed,
                                    precision=self.fitting_precision,
                                    uniform_seed=self.uniform_seed)
            if (not self.uniform_seed) and (self.seed is not None):
                self.seed += self.seed_shift
            # (nframes x natoms) x 1 * naxis
            final_layer = tf.reshape(final_layer, [
                tf.shape(inputs)[0] * natoms[2 + type_i], 1, self.dim_rot_mat_1
            ])
            # (nframes x natoms) x 1 x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # nframes x natoms x 3
            final_layer = tf.reshape(
                final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3])

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis=1)
            count += 1

        tf.summary.histogram('fitting_net_output', outs)
        return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
Exemplo n.º 21
0
    def _build_fv_graph_inner(self):
        self.t_ef = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None],
                                   name='t_ef')
        nf = 10
        nfxnas = 64 * nf
        nfxna = 192 * nf
        nf = -1
        nfxnas = -1
        nfxna = -1
        self.t_box_reshape = tf.reshape(self.t_box, [-1, 9])
        t_nframes = tf.shape(self.t_box_reshape)[0]
        # (nframes x natoms_sel) x 1 x 3
        self.t_ef_reshape = tf.reshape(self.t_ef, [nfxnas, 1, 3])
        # (nframes x natoms) x ndescrpt
        self.descrpt = self.graph.get_tensor_by_name(
            os.path.join(self.modifier_prefix, 'o_rmat:0'))
        self.descrpt_deriv = self.graph.get_tensor_by_name(
            os.path.join(self.modifier_prefix, 'o_rmat_deriv:0'))
        self.nlist = self.graph.get_tensor_by_name(
            os.path.join(self.modifier_prefix, 'o_nlist:0'))
        self.rij = self.graph.get_tensor_by_name(
            os.path.join(self.modifier_prefix, 'o_rij:0'))
        # self.descrpt_reshape = tf.reshape(self.descrpt, [nf, 192 * self.ndescrpt])
        # self.descrpt_deriv = tf.reshape(self.descrpt_deriv, [nf, 192 * self.ndescrpt * 3])

        # nframes x (natoms_sel x 3)
        self.t_tensor_reshpe = tf.reshape(self.t_tensor, [t_nframes, -1])
        # nframes x (natoms x 3)
        self.t_tensor_reshpe = self._enrich(self.t_tensor_reshpe, dof=3)
        # (nframes x natoms) x 3
        self.t_tensor_reshpe = tf.reshape(self.t_tensor_reshpe, [nfxna, 3])
        # (nframes x natoms) x 1
        self.t_dipole_x = tf.slice(self.t_tensor_reshpe, [0, 0], [nfxna, 1])
        self.t_dipole_y = tf.slice(self.t_tensor_reshpe, [0, 1], [nfxna, 1])
        self.t_dipole_z = tf.slice(self.t_tensor_reshpe, [0, 2], [nfxna, 1])
        self.t_dipole_z = tf.reshape(self.t_dipole_z, [nfxna, 1])
        # (nframes x natoms) x ndescrpt
        [self.t_dipole_x_d] = tf.gradients(self.t_dipole_x, self.descrpt)
        [self.t_dipole_y_d] = tf.gradients(self.t_dipole_y, self.descrpt)
        [self.t_dipole_z_d] = tf.gradients(self.t_dipole_z, self.descrpt)
        # nframes x (natoms x ndescrpt)
        self.t_dipole_x_d = tf.reshape(self.t_dipole_x_d,
                                       [-1, self.t_natoms[0] * self.ndescrpt])
        self.t_dipole_y_d = tf.reshape(self.t_dipole_y_d,
                                       [-1, self.t_natoms[0] * self.ndescrpt])
        self.t_dipole_z_d = tf.reshape(self.t_dipole_z_d,
                                       [-1, self.t_natoms[0] * self.ndescrpt])
        # nframes x (natoms_sel x ndescrpt)
        self.t_dipole_x_d = self._slice_descrpt_deriv(self.t_dipole_x_d)
        self.t_dipole_y_d = self._slice_descrpt_deriv(self.t_dipole_y_d)
        self.t_dipole_z_d = self._slice_descrpt_deriv(self.t_dipole_z_d)
        # (nframes x natoms_sel) x ndescrpt
        self.t_dipole_x_d = tf.reshape(self.t_dipole_x_d,
                                       [nfxnas, self.ndescrpt])
        self.t_dipole_y_d = tf.reshape(self.t_dipole_y_d,
                                       [nfxnas, self.ndescrpt])
        self.t_dipole_z_d = tf.reshape(self.t_dipole_z_d,
                                       [nfxnas, self.ndescrpt])
        # (nframes x natoms_sel) x 3 x ndescrpt
        self.t_dipole_d = tf.concat(
            [self.t_dipole_x_d, self.t_dipole_y_d, self.t_dipole_z_d], axis=1)
        self.t_dipole_d = tf.reshape(self.t_dipole_d,
                                     [nfxnas, 3 * self.ndescrpt])
        # (nframes x natoms_sel) x 3 x ndescrpt
        self.t_dipole_d = tf.reshape(self.t_dipole_d, [-1, 3, self.ndescrpt])
        # (nframes x natoms_sel) x 1 x ndescrpt
        self.t_ef_d = tf.matmul(self.t_ef_reshape, self.t_dipole_d)
        # nframes x (natoms_sel x ndescrpt)
        self.t_ef_d = tf.reshape(self.t_ef_d, [t_nframes, -1])
        # nframes x (natoms x ndescrpt)
        self.t_ef_d = self._enrich(self.t_ef_d, dof=self.ndescrpt)
        self.t_ef_d = tf.reshape(self.t_ef_d,
                                 [nf, self.t_natoms[0] * self.ndescrpt])
        # t_ef_d is force (with -1), prod_forc takes deriv, so we need the opposite
        self.t_ef_d_oppo = -self.t_ef_d

        force = op_module.prod_force_se_a(self.t_ef_d_oppo,
                                          self.descrpt_deriv,
                                          self.nlist,
                                          self.t_natoms,
                                          n_a_sel=self.nnei_a,
                                          n_r_sel=self.nnei_r)
        virial, atom_virial \
            = op_module.prod_virial_se_a (self.t_ef_d_oppo,
                                          self.descrpt_deriv,
                                          self.rij,
                                          self.nlist,
                                          self.t_natoms,
                                          n_a_sel = self.nnei_a,
                                          n_r_sel = self.nnei_r)
        force = tf.identity(force, name='o_dm_force')
        virial = tf.identity(virial, name='o_dm_virial')
        atom_virial = tf.identity(atom_virial, name='o_dm_av')
        return force, virial, atom_virial
Exemplo n.º 22
0
 def _filter_lower(
     self,
     type_i,
     type_input,
     start_index,
     incrs_index,
     inputs,
     nframes,
     natoms,
     type_embedding=None,
     is_exclude=False,
     activation_fn=None,
     bavg=0.0,
     stddev=1.0,
     trainable=True,
     suffix='',
 ):
     """
     input env matrix, returns R.G
     """
     outputs_size = [1] + self.filter_neuron
     # cut-out inputs
     # with natom x (nei_type_i x 4)
     inputs_i = tf.slice(inputs, [0, start_index * 4],
                         [-1, incrs_index * 4])
     shape_i = inputs_i.get_shape().as_list()
     natom = tf.shape(inputs_i)[0]
     # with (natom x nei_type_i) x 4
     inputs_reshape = tf.reshape(inputs_i, [-1, 4])
     # with (natom x nei_type_i) x 1
     xyz_scatter = tf.reshape(tf.slice(inputs_reshape, [0, 0], [-1, 1]),
                              [-1, 1])
     if type_embedding is not None:
         xyz_scatter = self._concat_type_embedding(xyz_scatter, nframes,
                                                   natoms, type_embedding)
         if self.compress:
             raise RuntimeError(
                 'compression of type embedded descriptor is not supported at the moment'
             )
     # natom x 4 x outputs_size
     if self.compress and (not is_exclude):
         info = [
             self.lower, self.upper, self.upper * self.table_config[0],
             self.table_config[1], self.table_config[2],
             self.table_config[3]
         ]
         if self.type_one_side:
             net = 'filter_-1_net_' + str(type_i)
         else:
             net = 'filter_' + str(type_input) + '_net_' + str(type_i)
         return op_module.tabulate_fusion_se_a(
             tf.cast(self.table.data[net], self.filter_precision),
             info,
             xyz_scatter,
             tf.reshape(inputs_i, [natom, shape_i[1] // 4, 4]),
             last_layer_size=outputs_size[-1])
     else:
         if (not is_exclude):
             # with (natom x nei_type_i) x out_size
             xyz_scatter = embedding_net(
                 xyz_scatter,
                 self.filter_neuron,
                 self.filter_precision,
                 activation_fn=activation_fn,
                 resnet_dt=self.filter_resnet_dt,
                 name_suffix=suffix,
                 stddev=stddev,
                 bavg=bavg,
                 seed=self.seed,
                 trainable=trainable,
                 uniform_seed=self.uniform_seed,
                 initial_variables=self.embedding_net_variables,
                 mixed_prec=self.mixed_prec)
             if (not self.uniform_seed) and (self.seed is not None):
                 self.seed += self.seed_shift
         else:
             # we can safely return the final xyz_scatter filled with zero directly
             return tf.cast(tf.fill((natom, 4, outputs_size[-1]), 0.),
                            self.filter_precision)
         # natom x nei_type_i x out_size
         xyz_scatter = tf.reshape(xyz_scatter,
                                  (-1, shape_i[1] // 4, outputs_size[-1]))
         # When using tf.reshape(inputs_i, [-1, shape_i[1]//4, 4]) below
         # [588 24] -> [588 6 4] correct
         # but if sel is zero
         # [588 0] -> [147 0 4] incorrect; the correct one is [588 0 4]
         # So we need to explicitly assign the shape to tf.shape(inputs_i)[0] instead of -1
         # natom x 4 x outputs_size
         return tf.matmul(tf.reshape(inputs_i, [natom, shape_i[1] // 4, 4]),
                          xyz_scatter,
                          transpose_a=True)
Exemplo n.º 23
0
    def _filter(self,
                inputs,
                type_input,
                natoms,
                type_embedding=None,
                activation_fn=tf.nn.tanh,
                stddev=1.0,
                bavg=0.0,
                name='linear',
                reuse=None,
                trainable=True):
        nframes = tf.shape(tf.reshape(inputs,
                                      [-1, natoms[0], self.ndescrpt]))[0]
        # natom x (nei x 4)
        shape = inputs.get_shape().as_list()
        outputs_size = [1] + self.filter_neuron
        outputs_size_2 = self.n_axis_neuron
        all_excluded = all([(type_input, type_i) in self.exclude_types
                            for type_i in range(self.ntypes)])
        if all_excluded:
            # all types are excluded so result and qmat should be zeros
            # we can safaly return a zero matrix...
            # See also https://stackoverflow.com/a/34725458/9567349
            # result: natom x outputs_size x outputs_size_2
            # qmat: natom x outputs_size x 3
            natom = tf.shape(inputs)[0]
            result = tf.cast(
                tf.fill((natom, outputs_size_2, outputs_size[-1]), 0.),
                GLOBAL_TF_FLOAT_PRECISION)
            qmat = tf.cast(tf.fill((natom, outputs_size[-1], 3), 0.),
                           GLOBAL_TF_FLOAT_PRECISION)
            return result, qmat

        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            type_i = 0
            # natom x 4 x outputs_size
            if type_embedding is None:
                rets = []
                for type_i in range(self.ntypes):
                    ret = self._filter_lower(type_i,
                                             type_input,
                                             start_index,
                                             self.sel_a[type_i],
                                             inputs,
                                             nframes,
                                             natoms,
                                             type_embedding=type_embedding,
                                             is_exclude=(type_input, type_i)
                                             in self.exclude_types,
                                             activation_fn=activation_fn,
                                             stddev=stddev,
                                             bavg=bavg,
                                             trainable=trainable,
                                             suffix="_" + str(type_i))
                    if (type_input, type_i) not in self.exclude_types:
                        # add zero is meaningless; skip
                        rets.append(ret)
                    start_index += self.sel_a[type_i]
                # faster to use accumulate_n than multiple add
                xyz_scatter_1 = tf.accumulate_n(rets)
            else:
                xyz_scatter_1 = self._filter_lower(
                    type_i,
                    type_input,
                    start_index,
                    np.cumsum(self.sel_a)[-1],
                    inputs,
                    nframes,
                    natoms,
                    type_embedding=type_embedding,
                    is_exclude=False,
                    activation_fn=activation_fn,
                    stddev=stddev,
                    bavg=bavg,
                    trainable=trainable)
            # natom x nei x outputs_size
            # xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x nei x 4
            # inputs_reshape = tf.reshape(inputs, [-1, shape[1]//4, 4])
            # natom x 4 x outputs_size
            # xyz_scatter_1 = tf.matmul(inputs_reshape, xyz_scatter, transpose_a = True)
            if self.original_sel is None:
                # shape[1] = nnei * 4
                nnei = shape[1] / 4
            else:
                nnei = tf.cast(
                    tf.Variable(np.sum(self.original_sel),
                                dtype=tf.int32,
                                trainable=False,
                                name="nnei"), self.filter_precision)
            xyz_scatter_1 = xyz_scatter_1 / nnei
            # natom x 4 x outputs_size_2
            xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                     [-1, -1, outputs_size_2])
            # # natom x 3 x outputs_size_2
            # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1])
            # natom x 3 x outputs_size_1
            qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1])
            # natom x outputs_size_1 x 3
            qmat = tf.transpose(qmat, perm=[0, 2, 1])
            # natom x outputs_size x outputs_size_2
            result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True)
            # natom x (outputs_size x outputs_size_2)
            result = tf.reshape(result,
                                [-1, outputs_size_2 * outputs_size[-1]])

        return result, qmat
Exemplo n.º 24
0
    def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''):
        start_index = 0
        inputs = tf.cast(
            tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]),
            self.fitting_precision)
        rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt],
                                [-1, natoms[2 + type_i] * self.dim_descrpt])
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat],
                                 [-1, natoms[2 + type_i] * self.dim_rot_mat])
            rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
            start_index += natoms[2 + type_i]
            if not type_i in self.sel_type:
                continue
            layer = inputs_i
            for ii in range(0, len(self.n_neuron)):
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
                    layer += one_layer(
                        layer,
                        self.n_neuron[ii],
                        name='layer_' + str(ii) + '_type_' + str(type_i) +
                        suffix,
                        reuse=reuse,
                        seed=self.seed,
                        use_timestep=self.resnet_dt,
                        activation_fn=self.fitting_activation_fn,
                        precision=self.fitting_precision)
                else:
                    layer = one_layer(layer,
                                      self.n_neuron[ii],
                                      name='layer_' + str(ii) + '_type_' +
                                      str(type_i) + suffix,
                                      reuse=reuse,
                                      seed=self.seed,
                                      activation_fn=self.fitting_activation_fn,
                                      precision=self.fitting_precision)
            if self.fit_diag:
                bavg = np.zeros(self.dim_rot_mat_1)
                # bavg[0] = self.avgeig[0]
                # bavg[1] = self.avgeig[1]
                # bavg[2] = self.avgeig[2]
                # (nframes x natoms) x naxis
                final_layer = one_layer(layer,
                                        self.dim_rot_mat_1,
                                        activation_fn=None,
                                        name='final_layer_type_' +
                                        str(type_i) + suffix,
                                        reuse=reuse,
                                        seed=self.seed,
                                        bavg=bavg,
                                        precision=self.fitting_precision)
                # (nframes x natoms) x naxis
                final_layer = tf.reshape(final_layer, [
                    tf.shape(inputs)[0] * natoms[2 + type_i],
                    self.dim_rot_mat_1
                ])
                # (nframes x natoms) x naxis x naxis
                final_layer = tf.matrix_diag(final_layer)
            else:
                bavg = np.zeros(self.dim_rot_mat_1 * self.dim_rot_mat_1)
                # bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0]
                # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1]
                # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2]
                # (nframes x natoms) x (naxis x naxis)
                final_layer = one_layer(
                    layer,
                    self.dim_rot_mat_1 * self.dim_rot_mat_1,
                    activation_fn=None,
                    name='final_layer_type_' + str(type_i) + suffix,
                    reuse=reuse,
                    seed=self.seed,
                    bavg=bavg,
                    precision=self.fitting_precision)
                # (nframes x natoms) x naxis x naxis
                final_layer = tf.reshape(final_layer, [
                    tf.shape(inputs)[0] * natoms[2 + type_i],
                    self.dim_rot_mat_1, self.dim_rot_mat_1
                ])
                # (nframes x natoms) x naxis x naxis
                final_layer = final_layer + tf.transpose(final_layer,
                                                         perm=[0, 2, 1])
            # (nframes x natoms) x naxis x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # (nframes x natoms) x 3(coord) x 3(coord)
            final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a=True)
            # nframes x natoms x 3 x 3
            final_layer = tf.reshape(
                final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3, 3])
            # shift and scale
            sel_type_idx = self.sel_type.index(type_i)
            final_layer = final_layer * self.scale[sel_type_idx]
            final_layer = final_layer + self.diag_shift[sel_type_idx] * tf.eye(
                3,
                batch_shape=[tf.shape(inputs)[0], natoms[2 + type_i]],
                dtype=global_tf_float_precision)

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis=1)
            count += 1

        return tf.cast(tf.reshape(outs, [-1]), global_tf_float_precision)
Exemplo n.º 25
0
    def _filter_type_ext(self,
                         inputs,
                         natoms,
                         activation_fn=tf.nn.tanh,
                         stddev=1.0,
                         bavg=0.0,
                         name='linear',
                         reuse=None,
                         seed=None,
                         trainable=True):
        # natom x (nei x 4)
        outputs_size = [1] + self.filter_neuron
        outputs_size_2 = self.n_axis_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            result_all = []
            xyz_scatter_1_all = []
            xyz_scatter_2_all = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x (nei_type_i x 4)
                inputs_i = tf.slice(inputs, [0, start_index * 4],
                                    [-1, self.sel_a[type_i] * 4])
                start_index += self.sel_a[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 4
                inputs_reshape = tf.reshape(inputs_i, [-1, 4])
                xyz_scatter = tf.reshape(
                    tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1])
                for ii in range(1, len(outputs_size)):
                    w = tf.get_variable(
                        'matrix_' + str(ii) + '_' + str(type_i),
                        [outputs_size[ii - 1], outputs_size[ii]],
                        self.filter_precision,
                        tf.random_normal_initializer(
                            stddev=stddev /
                            np.sqrt(outputs_size[ii] + outputs_size[ii - 1]),
                            seed=seed),
                        trainable=trainable)
                    b = tf.get_variable('bias_' + str(ii) + '_' + str(type_i),
                                        [1, outputs_size[ii]],
                                        self.filter_precision,
                                        tf.random_normal_initializer(
                                            stddev=stddev,
                                            mean=bavg,
                                            seed=seed),
                                        trainable=trainable)
                    if self.filter_resnet_dt:
                        idt = tf.get_variable(
                            'idt_' + str(ii) + '_' + str(type_i),
                            [1, outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(stddev=0.001,
                                                         mean=1.0,
                                                         seed=seed),
                            trainable=trainable)
                    if outputs_size[ii] == outputs_size[ii - 1]:
                        if self.filter_resnet_dt:
                            xyz_scatter += activation_fn(
                                tf.matmul(xyz_scatter, w) + b) * idt
                        else:
                            xyz_scatter += activation_fn(
                                tf.matmul(xyz_scatter, w) + b)
                    elif outputs_size[ii] == outputs_size[ii - 1] * 2:
                        if self.filter_resnet_dt:
                            xyz_scatter = tf.concat(
                                [xyz_scatter, xyz_scatter], 1) + activation_fn(
                                    tf.matmul(xyz_scatter, w) + b) * idt
                        else:
                            xyz_scatter = tf.concat(
                                [xyz_scatter, xyz_scatter], 1) + activation_fn(
                                    tf.matmul(xyz_scatter, w) + b)
                    else:
                        xyz_scatter = activation_fn(
                            tf.matmul(xyz_scatter, w) + b)
                # natom x nei_type_i x out_size
                xyz_scatter = tf.reshape(
                    xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1]))
                # natom x nei_type_i x 4
                inputs_i_reshape = tf.reshape(inputs_i,
                                              [-1, shape_i[1] // 4, 4])
                # natom x 4 x outputs_size
                xyz_scatter_1 = tf.matmul(inputs_i_reshape,
                                          xyz_scatter,
                                          transpose_a=True)
                xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape_i[1])
                # natom x 4 x outputs_size_2
                xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                         [-1, -1, outputs_size_2])
                xyz_scatter_1_all.append(xyz_scatter_1)
                xyz_scatter_2_all.append(xyz_scatter_2)

            # for type_i in range(self.ntypes):
            #   for type_j in range(type_i, self.ntypes):
            #     # natom x outputs_size x outputs_size_2
            #     result = tf.matmul(xyz_scatter_1_all[type_i], xyz_scatter_2_all[type_j], transpose_a = True)
            #     # natom x (outputs_size x outputs_size_2)
            #     result = tf.reshape(result, [-1, outputs_size_2 * outputs_size[-1]])
            #     result_all.append(tf.identity(result))
            xyz_scatter_2_coll = tf.concat(xyz_scatter_2_all, axis=2)
            for type_i in range(self.ntypes):
                # natom x outputs_size x (outputs_size_2 x ntypes)
                result = tf.matmul(xyz_scatter_1_all[type_i],
                                   xyz_scatter_2_coll,
                                   transpose_a=True)
                # natom x (outputs_size x outputs_size_2 x ntypes)
                result = tf.reshape(
                    result,
                    [-1, outputs_size_2 * self.ntypes * outputs_size[-1]])
                result_all.append(tf.identity(result))

            # natom x (ntypes x outputs_size x outputs_size_2 x ntypes)
            result_all = tf.concat(result_all, axis=1)

        return result_all
Exemplo n.º 26
0
    def build(self, input_d, rot_mat, natoms, reuse=None, suffix=''):
        start_index = 0
        inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]])
        rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])
        shape = inputs.get_shape().as_list()

        count = 0
        for type_i in range(self.ntypes):
            # cut-out inputs
            inputs_i = tf.slice(inputs, [0, start_index * self.dim_descrpt],
                                [-1, natoms[2 + type_i] * self.dim_descrpt])
            inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
            rot_mat_i = tf.slice(rot_mat, [0, start_index * self.dim_rot_mat],
                                 [-1, natoms[2 + type_i] * self.dim_rot_mat])
            rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
            start_index += natoms[2 + type_i]
            if not type_i in self.sel_type:
                continue
            layer = inputs_i
            for ii in range(0, len(self.n_neuron)):
                if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
                    layer += one_layer(layer,
                                       self.n_neuron[ii],
                                       name='layer_' + str(ii) + '_type_' +
                                       str(type_i) + suffix,
                                       reuse=reuse,
                                       seed=self.seed,
                                       use_timestep=self.resnet_dt)
                else:
                    layer = one_layer(layer,
                                      self.n_neuron[ii],
                                      name='layer_' + str(ii) + '_type_' +
                                      str(type_i) + suffix,
                                      reuse=reuse,
                                      seed=self.seed)
            # (nframes x natoms) x (naxis x naxis)
            final_layer = one_layer(layer,
                                    self.dim_rot_mat_1 * self.dim_rot_mat_1,
                                    activation_fn=None,
                                    name='final_layer_type_' + str(type_i) +
                                    suffix,
                                    reuse=reuse,
                                    seed=self.seed)
            # (nframes x natoms) x naxis x naxis
            final_layer = tf.reshape(final_layer, [
                tf.shape(inputs)[0] * natoms[2 + type_i], self.dim_rot_mat_1,
                self.dim_rot_mat_1
            ])
            # (nframes x natoms) x naxis x naxis
            final_layer = final_layer + tf.transpose(final_layer,
                                                     perm=[0, 2, 1])
            # (nframes x natoms) x naxis x 3(coord)
            final_layer = tf.matmul(final_layer, rot_mat_i)
            # (nframes x natoms) x 3(coord) x 3(coord)
            final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a=True)
            # nframes x natoms x 3 x 3
            final_layer = tf.reshape(
                final_layer, [tf.shape(inputs)[0], natoms[2 + type_i], 3, 3])

            # concat the results
            if count == 0:
                outs = final_layer
            else:
                outs = tf.concat([outs, final_layer], axis=1)
            count += 1

        return tf.reshape(outs, [-1])