Пример #1
0
    def _filter_r(self,
                  inputs,
                  type_input,
                  natoms,
                  activation_fn=tf.nn.tanh,
                  stddev=1.0,
                  bavg=0.0,
                  name='linear',
                  reuse=None,
                  trainable=True):
        # natom x nei
        outputs_size = [1] + self.filter_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x nei_type_i
                inputs_i = tf.slice(inputs, [0, start_index],
                                    [-1, self.sel_r[type_i]])
                start_index += self.sel_r[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 1
                xyz_scatter = tf.reshape(inputs_i, [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    xyz_scatter = embedding_net(
                        xyz_scatter,
                        self.filter_neuron,
                        self.filter_precision,
                        activation_fn=activation_fn,
                        resnet_dt=self.filter_resnet_dt,
                        name_suffix="_" + str(type_i),
                        stddev=stddev,
                        bavg=bavg,
                        seed=self.seed,
                        trainable=trainable,
                        uniform_seed=self.uniform_seed,
                        initial_variables=self.embedding_net_variables,
                    )
                    if (not self.uniform_seed) and (self.seed is not None):
                        self.seed += self.seed_shift
                    # natom x nei_type_i x out_size
                    xyz_scatter = tf.reshape(
                        xyz_scatter, (-1, shape_i[1], outputs_size[-1]))
                else:
                    natom = tf.shape(inputs)[0]
                    xyz_scatter = tf.cast(
                        tf.fill((natom, shape_i[1], outputs_size[-1]), 0.),
                        GLOBAL_TF_FLOAT_PRECISION)
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x outputs_size
            #
            res_rescale = 1. / 5.
            result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale

        return result
Пример #2
0
    def _filter(self,
                inputs,
                type_input,
                natoms,
                type_embedding=None,
                activation_fn=tf.nn.tanh,
                stddev=1.0,
                bavg=0.0,
                name='linear',
                reuse=None,
                trainable=True):
        nframes = tf.shape(tf.reshape(inputs,
                                      [-1, natoms[0], self.ndescrpt]))[0]
        # natom x (nei x 4)
        shape = inputs.get_shape().as_list()
        outputs_size = [1] + self.filter_neuron
        outputs_size_2 = self.n_axis_neuron
        all_excluded = all([(type_input, type_i) in self.exclude_types
                            for type_i in range(self.ntypes)])
        if all_excluded:
            # all types are excluded so result and qmat should be zeros
            # we can safaly return a zero matrix...
            # See also https://stackoverflow.com/a/34725458/9567349
            # result: natom x outputs_size x outputs_size_2
            # qmat: natom x outputs_size x 3
            natom = tf.shape(inputs)[0]
            result = tf.cast(
                tf.fill((natom, outputs_size_2, outputs_size[-1]), 0.),
                GLOBAL_TF_FLOAT_PRECISION)
            qmat = tf.cast(tf.fill((natom, outputs_size[-1], 3), 0.),
                           GLOBAL_TF_FLOAT_PRECISION)
            return result, qmat

        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            type_i = 0
            # natom x 4 x outputs_size
            if type_embedding is None:
                rets = []
                for type_i in range(self.ntypes):
                    ret = self._filter_lower(type_i,
                                             type_input,
                                             start_index,
                                             self.sel_a[type_i],
                                             inputs,
                                             nframes,
                                             natoms,
                                             type_embedding=type_embedding,
                                             is_exclude=(type_input, type_i)
                                             in self.exclude_types,
                                             activation_fn=activation_fn,
                                             stddev=stddev,
                                             bavg=bavg,
                                             trainable=trainable,
                                             suffix="_" + str(type_i))
                    if (type_input, type_i) not in self.exclude_types:
                        # add zero is meaningless; skip
                        rets.append(ret)
                    start_index += self.sel_a[type_i]
                # faster to use accumulate_n than multiple add
                xyz_scatter_1 = tf.accumulate_n(rets)
            else:
                xyz_scatter_1 = self._filter_lower(
                    type_i,
                    type_input,
                    start_index,
                    np.cumsum(self.sel_a)[-1],
                    inputs,
                    nframes,
                    natoms,
                    type_embedding=type_embedding,
                    is_exclude=False,
                    activation_fn=activation_fn,
                    stddev=stddev,
                    bavg=bavg,
                    trainable=trainable)
            # natom x nei x outputs_size
            # xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x nei x 4
            # inputs_reshape = tf.reshape(inputs, [-1, shape[1]//4, 4])
            # natom x 4 x outputs_size
            # xyz_scatter_1 = tf.matmul(inputs_reshape, xyz_scatter, transpose_a = True)
            if self.original_sel is None:
                # shape[1] = nnei * 4
                nnei = shape[1] / 4
            else:
                nnei = tf.cast(
                    tf.Variable(np.sum(self.original_sel),
                                dtype=tf.int32,
                                trainable=False,
                                name="nnei"), self.filter_precision)
            xyz_scatter_1 = xyz_scatter_1 / nnei
            # natom x 4 x outputs_size_2
            xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                     [-1, -1, outputs_size_2])
            # # natom x 3 x outputs_size_2
            # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1])
            # natom x 3 x outputs_size_1
            qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1])
            # natom x outputs_size_1 x 3
            qmat = tf.transpose(qmat, perm=[0, 2, 1])
            # natom x outputs_size x outputs_size_2
            result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True)
            # natom x (outputs_size x outputs_size_2)
            result = tf.reshape(result,
                                [-1, outputs_size_2 * outputs_size[-1]])

        return result, qmat
Пример #3
0
 def _filter_lower(
     self,
     type_i,
     type_input,
     start_index,
     incrs_index,
     inputs,
     nframes,
     natoms,
     type_embedding=None,
     is_exclude=False,
     activation_fn=None,
     bavg=0.0,
     stddev=1.0,
     trainable=True,
     suffix='',
 ):
     """
     input env matrix, returns R.G
     """
     outputs_size = [1] + self.filter_neuron
     # cut-out inputs
     # with natom x (nei_type_i x 4)
     inputs_i = tf.slice(inputs, [0, start_index * 4],
                         [-1, incrs_index * 4])
     shape_i = inputs_i.get_shape().as_list()
     natom = tf.shape(inputs_i)[0]
     # with (natom x nei_type_i) x 4
     inputs_reshape = tf.reshape(inputs_i, [-1, 4])
     # with (natom x nei_type_i) x 1
     xyz_scatter = tf.reshape(tf.slice(inputs_reshape, [0, 0], [-1, 1]),
                              [-1, 1])
     if type_embedding is not None:
         xyz_scatter = self._concat_type_embedding(xyz_scatter, nframes,
                                                   natoms, type_embedding)
         if self.compress:
             raise RuntimeError(
                 'compression of type embedded descriptor is not supported at the moment'
             )
     # natom x 4 x outputs_size
     if self.compress and (not is_exclude):
         info = [
             self.lower, self.upper, self.upper * self.table_config[0],
             self.table_config[1], self.table_config[2],
             self.table_config[3]
         ]
         if self.type_one_side:
             net = 'filter_-1_net_' + str(type_i)
         else:
             net = 'filter_' + str(type_input) + '_net_' + str(type_i)
         return op_module.tabulate_fusion_se_a(
             tf.cast(self.table.data[net], self.filter_precision),
             info,
             xyz_scatter,
             tf.reshape(inputs_i, [natom, shape_i[1] // 4, 4]),
             last_layer_size=outputs_size[-1])
     else:
         if (not is_exclude):
             # with (natom x nei_type_i) x out_size
             xyz_scatter = embedding_net(
                 xyz_scatter,
                 self.filter_neuron,
                 self.filter_precision,
                 activation_fn=activation_fn,
                 resnet_dt=self.filter_resnet_dt,
                 name_suffix=suffix,
                 stddev=stddev,
                 bavg=bavg,
                 seed=self.seed,
                 trainable=trainable,
                 uniform_seed=self.uniform_seed,
                 initial_variables=self.embedding_net_variables,
                 mixed_prec=self.mixed_prec)
             if (not self.uniform_seed) and (self.seed is not None):
                 self.seed += self.seed_shift
         else:
             # we can safely return the final xyz_scatter filled with zero directly
             return tf.cast(tf.fill((natom, 4, outputs_size[-1]), 0.),
                            self.filter_precision)
         # natom x nei_type_i x out_size
         xyz_scatter = tf.reshape(xyz_scatter,
                                  (-1, shape_i[1] // 4, outputs_size[-1]))
         # When using tf.reshape(inputs_i, [-1, shape_i[1]//4, 4]) below
         # [588 24] -> [588 6 4] correct
         # but if sel is zero
         # [588 0] -> [147 0 4] incorrect; the correct one is [588 0 4]
         # So we need to explicitly assign the shape to tf.shape(inputs_i)[0] instead of -1
         # natom x 4 x outputs_size
         return tf.matmul(tf.reshape(inputs_i, [natom, shape_i[1] // 4, 4]),
                          xyz_scatter,
                          transpose_a=True)