Beispiel #1
0
 def __init__(self, jdata, descrpt):
     if not isinstance(descrpt, DescrptSeA):
         raise RuntimeError('PolarFittingSeA only supports DescrptSeA')
     self.ntypes = descrpt.get_ntypes()
     self.dim_descrpt = descrpt.get_dim_out()
     args = ClassArg()\
            .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
            .add('resnet_dt',        bool,   default = True)\
            .add('fit_diag',         bool,   default = True)\
            .add('diag_shift',       [list,float], default = [0.0 for ii in range(self.ntypes)])\
            .add('scale',            [list,float], default = [1.0 for ii in range(self.ntypes)])\
            .add('sel_type',         [list,int],   default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\
            .add('seed',             int)\
            .add("activation_function", str ,   default = "tanh")\
            .add('precision',           str,    default = "default")
     class_data = args.parse(jdata)
     self.n_neuron = class_data['neuron']
     self.resnet_dt = class_data['resnet_dt']
     self.sel_type = class_data['sel_type']
     self.fit_diag = class_data['fit_diag']
     self.seed = class_data['seed']
     self.diag_shift = class_data['diag_shift']
     self.scale = class_data['scale']
     self.fitting_activation_fn = get_activation_func(
         class_data["activation_function"])
     self.fitting_precision = get_precision(class_data['precision'])
     if type(self.sel_type) is not list:
         self.sel_type = [self.sel_type]
     if type(self.diag_shift) is not list:
         self.diag_shift = [self.diag_shift]
     if type(self.scale) is not list:
         self.scale = [self.scale]
     self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
     self.dim_rot_mat = self.dim_rot_mat_1 * 3
     self.useBN = False
Beispiel #2
0
 def __init__ (self, jdata, descrpt):
     if not isinstance(descrpt, DescrptLocFrame) :
         raise RuntimeError('WFC only supports DescrptLocFrame')
     self.ntypes = descrpt.get_ntypes()
     self.dim_descrpt = descrpt.get_dim_out()
     args = ClassArg()\
            .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
            .add('resnet_dt',        bool,   default = True)\
            .add('wfc_numb',         int,    must = True)\
            .add('sel_type',         [list,int],   default = [ii for ii in range(self.ntypes)], alias = 'wfc_type')\
            .add('seed',             int)\
            .add("activation_function", str, default = "tanh")\
            .add('precision',           str,    default = "default")\
            .add('uniform_seed',     bool, default = False)
     class_data = args.parse(jdata)
     self.n_neuron = class_data['neuron']
     self.resnet_dt = class_data['resnet_dt']
     self.wfc_numb = class_data['wfc_numb']
     self.sel_type = class_data['sel_type']
     self.seed = class_data['seed']
     self.uniform_seed = class_data['uniform_seed']
     self.seed_shift = one_layer_rand_seed_shift()
     self.fitting_activation_fn = get_activation_func(class_data["activation_function"])
     self.fitting_precision = get_precision(class_data['precision'])
     self.useBN = False
Beispiel #3
0
 def __init__(self, jdata, descrpt):
     # model param
     self.ntypes = descrpt.get_ntypes()
     self.dim_descrpt = descrpt.get_dim_out()
     args = ClassArg()\
            .add('numb_fparam',      int,    default = 0)\
            .add('numb_aparam',      int,    default = 0)\
            .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
            .add('resnet_dt',        bool,   default = True)\
            .add('rcond',            float,  default = 1e-3) \
            .add('seed',             int)               \
            .add('atom_ener',        list,   default = [])\
            .add("activation_function", str,    default = "tanh")\
            .add("precision",           str, default = "default")\
            .add("trainable",        [list, bool], default = True)
     class_data = args.parse(jdata)
     self.numb_fparam = class_data['numb_fparam']
     self.numb_aparam = class_data['numb_aparam']
     self.n_neuron = class_data['neuron']
     self.resnet_dt = class_data['resnet_dt']
     self.rcond = class_data['rcond']
     self.seed = class_data['seed']
     self.fitting_activation_fn = get_activation_func(
         class_data["activation_function"])
     self.fitting_precision = get_precision(class_data['precision'])
     self.trainable = class_data['trainable']
     if type(self.trainable) is bool:
         self.trainable = [self.trainable] * (len(self.n_neuron) + 1)
     assert (len(self.trainable) == len(self.n_neuron) +
             1), 'length of trainable should be that of n_neuron + 1'
     self.atom_ener = []
     for at, ae in enumerate(class_data['atom_ener']):
         if ae is not None:
             self.atom_ener.append(
                 tf.constant(ae,
                             global_tf_float_precision,
                             name="atom_%d_ener" % at))
         else:
             self.atom_ener.append(None)
     self.useBN = False
     self.bias_atom_e = None
     # data requirement
     if self.numb_fparam > 0:
         add_data_requirement('fparam',
                              self.numb_fparam,
                              atomic=False,
                              must=True,
                              high_prec=False)
         self.fparam_avg = None
         self.fparam_std = None
         self.fparam_inv_std = None
     if self.numb_aparam > 0:
         add_data_requirement('aparam',
                              self.numb_aparam,
                              atomic=True,
                              must=True,
                              high_prec=False)
         self.aparam_avg = None
         self.aparam_std = None
         self.aparam_inv_std = None
Beispiel #4
0
    def enable_mixed_precision(self, mixed_prec: dict = None) -> None:
        """
        Reveive the mixed precision setting.

        Parameters
        ----------
        mixed_prec
                The mixed precision setting used in the embedding net
        """
        self.mixed_prec = mixed_prec
        self.fitting_precision = get_precision(mixed_prec['output_prec'])
Beispiel #5
0
 def __init__(
     self,
     neuron: List[int] = [],
     resnet_dt: bool = False,
     activation_function: str = 'tanh',
     precision: str = 'default',
     trainable: bool = True,
     seed: int = None,
     uniform_seed: bool = False,
 ) -> None:
     """
     Constructor
     """
     self.neuron = neuron
     self.seed = seed
     self.filter_resnet_dt = resnet_dt
     self.filter_precision = get_precision(precision)
     self.filter_activation_fn = get_activation_func(activation_function)
     self.trainable = trainable
     self.uniform_seed = uniform_seed
Beispiel #6
0
 def __init__ (self, 
               descrpt : tf.Tensor,
               neuron : List[int] = [120,120,120], 
               resnet_dt : bool = True,
               sel_type : List[int] = None,
               seed : int = None,
               activation_function : str = 'tanh',
               precision : str = 'default',
               uniform_seed: bool = False
 ) -> None:
     """
     Constructor
     """
     if not isinstance(descrpt, DescrptSeA) :
         raise RuntimeError('DipoleFittingSeA only supports DescrptSeA')
     self.ntypes = descrpt.get_ntypes()
     self.dim_descrpt = descrpt.get_dim_out()
     # args = ClassArg()\
     #        .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
     #        .add('resnet_dt',        bool,   default = True)\
     #        .add('sel_type',         [list,int],   default = [ii for ii in range(self.ntypes)], alias = 'dipole_type')\
     #        .add('seed',             int)\
     #        .add("activation_function", str, default = "tanh")\
     #        .add('precision',           str,    default = "default")
     # class_data = args.parse(jdata)
     self.n_neuron = neuron
     self.resnet_dt = resnet_dt
     self.sel_type = sel_type
     if self.sel_type is None:
         self.sel_type = [ii for ii in range(self.ntypes)]
     self.seed = seed
     self.uniform_seed = uniform_seed
     self.seed_shift = one_layer_rand_seed_shift()
     self.fitting_activation_fn = get_activation_func(activation_function)
     self.fitting_precision = get_precision(precision)
     self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
     self.dim_rot_mat = self.dim_rot_mat_1 * 3
     self.useBN = False
     self.fitting_net_variables = None
     self.mixed_prec = None
Beispiel #7
0
    def _build_network(self, data):
        self.place_holders = {}
        if self.is_compress:
            for kk in ['coord', 'box']:
                self.place_holders[kk] = tf.placeholder(
                    GLOBAL_TF_FLOAT_PRECISION, [None], 't_' + kk)
            self._get_place_horders(data_requirement)
        else:
            self._get_place_horders(data.get_data_dict())

        self.place_holders['type'] = tf.placeholder(tf.int32, [None],
                                                    name='t_type')
        self.place_holders['natoms_vec'] = tf.placeholder(tf.int32,
                                                          [self.ntypes + 2],
                                                          name='t_natoms')
        self.place_holders['default_mesh'] = tf.placeholder(tf.int32, [None],
                                                            name='t_mesh')
        self.place_holders['is_training'] = tf.placeholder(tf.bool)
        self.model_pred\
            = self.model.build (self.place_holders['coord'],
                                self.place_holders['type'],
                                self.place_holders['natoms_vec'],
                                self.place_holders['box'],
                                self.place_holders['default_mesh'],
                                self.place_holders,
                                self.frz_model,
                                suffix = "",
                                reuse = False)

        self.l2_l, self.l2_more\
            = self.loss.build (self.learning_rate,
                               self.place_holders['natoms_vec'],
                               self.model_pred,
                               self.place_holders,
                               suffix = "test")

        if self.mixed_prec is not None:
            self.l2_l = tf.cast(self.l2_l,
                                get_precision(self.mixed_prec['output_prec']))
        log.info("built network")
Beispiel #8
0
    def __init__(self, jdata):
        args = ClassArg()\
               .add('sel',      list,   must = True) \
               .add('rcut',     float,  default = 6.0) \
               .add('rcut_smth',float,  default = 5.5) \
               .add('neuron',   list,   default = [10, 20, 40]) \
               .add('axis_neuron', int, default = 4, alias = 'n_axis_neuron') \
               .add('resnet_dt',bool,   default = False) \
               .add('trainable',bool,   default = True) \
               .add('seed',     int) \
               .add('exclude_types', list, default = []) \
               .add('set_davg_zero', bool, default = False) \
               .add('activation_function', str,    default = 'tanh') \
               .add('precision', str, default = "default")
        class_data = args.parse(jdata)
        self.sel_a = class_data['sel']
        self.rcut_r = class_data['rcut']
        self.rcut_r_smth = class_data['rcut_smth']
        self.filter_neuron = class_data['neuron']
        self.n_axis_neuron = class_data['axis_neuron']
        self.filter_resnet_dt = class_data['resnet_dt']
        self.seed = class_data['seed']
        self.trainable = class_data['trainable']
        self.filter_activation_fn = get_activation_func(
            class_data['activation_function'])
        self.filter_precision = get_precision(class_data['precision'])
        exclude_types = class_data['exclude_types']
        self.exclude_types = set()
        for tt in exclude_types:
            assert (len(tt) == 2)
            self.exclude_types.add((tt[0], tt[1]))
            self.exclude_types.add((tt[1], tt[0]))
        self.set_davg_zero = class_data['set_davg_zero']

        # descrpt config
        self.sel_r = [0 for ii in range(len(self.sel_a))]
        self.ntypes = len(self.sel_a)
        assert (self.ntypes == len(self.sel_r))
        self.rcut_a = -1
        # numb of neighbors and numb of descrptors
        self.nnei_a = np.cumsum(self.sel_a)[-1]
        self.nnei_r = np.cumsum(self.sel_r)[-1]
        self.nnei = self.nnei_a + self.nnei_r
        self.ndescrpt_a = self.nnei_a * 4
        self.ndescrpt_r = self.nnei_r * 1
        self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r
        self.useBN = False
        self.dstd = None
        self.davg = None

        self.place_holders = {}
        avg_zero = np.zeros([self.ntypes,
                             self.ndescrpt]).astype(global_np_float_precision)
        std_ones = np.ones([self.ntypes,
                            self.ndescrpt]).astype(global_np_float_precision)
        sub_graph = tf.Graph()
        with sub_graph.as_default():
            name_pfx = 'd_sea_'
            for ii in ['coord', 'box']:
                self.place_holders[ii] = tf.placeholder(
                    global_np_float_precision, [None, None],
                    name=name_pfx + 't_' + ii)
            self.place_holders['type'] = tf.placeholder(tf.int32, [None, None],
                                                        name=name_pfx +
                                                        't_type')
            self.place_holders['natoms_vec'] = tf.placeholder(
                tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms')
            self.place_holders['default_mesh'] = tf.placeholder(
                tf.int32, [None], name=name_pfx + 't_mesh')
            self.stat_descrpt, descrpt_deriv, rij, nlist \
                = op_module.descrpt_se_a(self.place_holders['coord'],
                                         self.place_holders['type'],
                                         self.place_holders['natoms_vec'],
                                         self.place_holders['box'],
                                         self.place_holders['default_mesh'],
                                         tf.constant(avg_zero),
                                         tf.constant(std_ones),
                                         rcut_a = self.rcut_a,
                                         rcut_r = self.rcut_r,
                                         rcut_r_smth = self.rcut_r_smth,
                                         sel_a = self.sel_a,
                                         sel_r = self.sel_r)
        self.sub_sess = tf.Session(graph=sub_graph,
                                   config=default_tf_session_config)
Beispiel #9
0
    def __init__(self,
                 descrpt: tf.Tensor,
                 neuron: List[int] = [120, 120, 120],
                 resnet_dt: bool = True,
                 numb_fparam: int = 0,
                 numb_aparam: int = 0,
                 rcond: float = 1e-3,
                 tot_ener_zero: bool = False,
                 trainable: List[bool] = None,
                 seed: int = None,
                 atom_ener: List[float] = [],
                 activation_function: str = 'tanh',
                 precision: str = 'default',
                 uniform_seed: bool = False) -> None:
        """
        Constructor
        """
        # model param
        self.ntypes = descrpt.get_ntypes()
        self.dim_descrpt = descrpt.get_dim_out()
        # args = ()\
        #        .add('numb_fparam',      int,    default = 0)\
        #        .add('numb_aparam',      int,    default = 0)\
        #        .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
        #        .add('resnet_dt',        bool,   default = True)\
        #        .add('rcond',            float,  default = 1e-3) \
        #        .add('tot_ener_zero',    bool,   default = False) \
        #        .add('seed',             int)               \
        #        .add('atom_ener',        list,   default = [])\
        #        .add("activation_function", str,    default = "tanh")\
        #        .add("precision",           str, default = "default")\
        #        .add("trainable",        [list, bool], default = True)
        self.numb_fparam = numb_fparam
        self.numb_aparam = numb_aparam
        self.n_neuron = neuron
        self.resnet_dt = resnet_dt
        self.rcond = rcond
        self.seed = seed
        self.uniform_seed = uniform_seed
        self.seed_shift = one_layer_rand_seed_shift()
        self.tot_ener_zero = tot_ener_zero
        self.fitting_activation_fn = get_activation_func(activation_function)
        self.fitting_precision = get_precision(precision)
        self.trainable = trainable
        if self.trainable is None:
            self.trainable = [True for ii in range(len(self.n_neuron) + 1)]
        if type(self.trainable) is bool:
            self.trainable = [self.trainable] * (len(self.n_neuron) + 1)
        assert (len(self.trainable) == len(self.n_neuron) +
                1), 'length of trainable should be that of n_neuron + 1'
        self.atom_ener = []
        self.atom_ener_v = atom_ener
        for at, ae in enumerate(atom_ener):
            if ae is not None:
                self.atom_ener.append(
                    tf.constant(ae,
                                self.fitting_precision,
                                name="atom_%d_ener" % at))
            else:
                self.atom_ener.append(None)
        self.useBN = False
        self.bias_atom_e = np.zeros(self.ntypes, dtype=np.float64)
        # data requirement
        if self.numb_fparam > 0:
            add_data_requirement('fparam',
                                 self.numb_fparam,
                                 atomic=False,
                                 must=True,
                                 high_prec=False)
            self.fparam_avg = None
            self.fparam_std = None
            self.fparam_inv_std = None
        if self.numb_aparam > 0:
            add_data_requirement('aparam',
                                 self.numb_aparam,
                                 atomic=True,
                                 must=True,
                                 high_prec=False)
            self.aparam_avg = None
            self.aparam_std = None
            self.aparam_inv_std = None

        self.fitting_net_variables = None
        self.mixed_prec = None
Beispiel #10
0
    def __init__(self,
                 rcut: float,
                 rcut_smth: float,
                 sel: List[str],
                 neuron: List[int] = [24, 48, 96],
                 resnet_dt: bool = False,
                 trainable: bool = True,
                 seed: int = None,
                 type_one_side: bool = True,
                 exclude_types: List[List[int]] = [],
                 set_davg_zero: bool = False,
                 activation_function: str = 'tanh',
                 precision: str = 'default',
                 uniform_seed: bool = False) -> None:
        """
        Constructor
        """
        # args = ClassArg()\
        #        .add('sel',      list,   must = True) \
        #        .add('rcut',     float,  default = 6.0) \
        #        .add('rcut_smth',float,  default = 0.5) \
        #        .add('neuron',   list,   default = [10, 20, 40]) \
        #        .add('resnet_dt',bool,   default = False) \
        #        .add('trainable',bool,   default = True) \
        #        .add('seed',     int) \
        #        .add('type_one_side', bool, default = False) \
        #        .add('exclude_types', list, default = []) \
        #        .add('set_davg_zero', bool, default = False) \
        #        .add("activation_function", str, default = "tanh") \
        #        .add("precision",           str, default = "default")
        # class_data = args.parse(jdata)
        self.sel_r = sel
        self.rcut = rcut
        self.rcut_smth = rcut_smth
        self.filter_neuron = neuron
        self.filter_resnet_dt = resnet_dt
        self.seed = seed
        self.uniform_seed = uniform_seed
        self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron)
        self.trainable = trainable
        self.filter_activation_fn = get_activation_func(activation_function)
        self.filter_precision = get_precision(precision)
        exclude_types = exclude_types
        self.exclude_types = set()
        for tt in exclude_types:
            assert (len(tt) == 2)
            self.exclude_types.add((tt[0], tt[1]))
            self.exclude_types.add((tt[1], tt[0]))
        self.set_davg_zero = set_davg_zero
        self.type_one_side = type_one_side

        # descrpt config
        self.sel_a = [0 for ii in range(len(self.sel_r))]
        self.ntypes = len(self.sel_r)
        # numb of neighbors and numb of descrptors
        self.nnei_a = np.cumsum(self.sel_a)[-1]
        self.nnei_r = np.cumsum(self.sel_r)[-1]
        self.nnei = self.nnei_a + self.nnei_r
        self.ndescrpt_a = self.nnei_a * 4
        self.ndescrpt_r = self.nnei_r * 1
        self.ndescrpt = self.nnei_r
        self.useBN = False
        self.davg = None
        self.dstd = None
        self.embedding_net_variables = None

        self.place_holders = {}
        avg_zero = np.zeros([self.ntypes,
                             self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        std_ones = np.ones([self.ntypes,
                            self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        sub_graph = tf.Graph()
        with sub_graph.as_default():
            name_pfx = 'd_ser_'
            for ii in ['coord', 'box']:
                self.place_holders[ii] = tf.placeholder(
                    GLOBAL_NP_FLOAT_PRECISION, [None, None],
                    name=name_pfx + 't_' + ii)
            self.place_holders['type'] = tf.placeholder(tf.int32, [None, None],
                                                        name=name_pfx +
                                                        't_type')
            self.place_holders['natoms_vec'] = tf.placeholder(
                tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms')
            self.place_holders['default_mesh'] = tf.placeholder(
                tf.int32, [None], name=name_pfx + 't_mesh')
            self.stat_descrpt, descrpt_deriv, rij, nlist \
                = op_module.prod_env_mat_r(self.place_holders['coord'],
                                         self.place_holders['type'],
                                         self.place_holders['natoms_vec'],
                                         self.place_holders['box'],
                                         self.place_holders['default_mesh'],
                                         tf.constant(avg_zero),
                                         tf.constant(std_ones),
                                         rcut = self.rcut,
                                         rcut_smth = self.rcut_smth,
                                         sel = self.sel_r)
            self.sub_sess = tf.Session(graph=sub_graph,
                                       config=default_tf_session_config)
Beispiel #11
0
def one_layer(inputs,
              outputs_size,
              activation_fn=tf.nn.tanh,
              precision=GLOBAL_TF_FLOAT_PRECISION,
              stddev=1.0,
              bavg=0.0,
              name='linear',
              reuse=None,
              seed=None,
              use_timestep=False,
              trainable=True,
              useBN=False,
              uniform_seed=False,
              initial_variables=None,
              mixed_prec=None,
              final_layer=False):
    # For good accuracy, the last layer of the fitting network uses a higher precision neuron network.
    if mixed_prec is not None and final_layer:
        inputs = tf.cast(inputs, get_precision(mixed_prec['output_prec']))
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w_initializer = tf.random_normal_initializer(
            stddev=stddev / np.sqrt(shape[1] + outputs_size),
            seed=seed if (seed is None or uniform_seed) else seed + 0)
        b_initializer = tf.random_normal_initializer(
            stddev=stddev,
            mean=bavg,
            seed=seed if (seed is None or uniform_seed) else seed + 1)
        if initial_variables is not None:
            w_initializer = tf.constant_initializer(
                initial_variables[name + '/matrix'])
            b_initializer = tf.constant_initializer(initial_variables[name +
                                                                      '/bias'])
        w = tf.get_variable('matrix', [shape[1], outputs_size],
                            precision,
                            w_initializer,
                            trainable=trainable)
        variable_summaries(w, 'matrix')
        b = tf.get_variable('bias', [outputs_size],
                            precision,
                            b_initializer,
                            trainable=trainable)
        variable_summaries(b, 'bias')

        if mixed_prec is not None and not final_layer:
            inputs = tf.cast(inputs, get_precision(mixed_prec['compute_prec']))
            w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
            b = tf.cast(b, get_precision(mixed_prec['compute_prec']))

        hidden = tf.nn.bias_add(tf.matmul(inputs, w), b)
        if activation_fn != None and use_timestep:
            idt_initializer = tf.random_normal_initializer(
                stddev=0.001,
                mean=0.1,
                seed=seed if (seed is None or uniform_seed) else seed + 2)
            if initial_variables is not None:
                idt_initializer = tf.constant_initializer(
                    initial_variables[name + '/idt'])
            idt = tf.get_variable('idt', [outputs_size],
                                  precision,
                                  idt_initializer,
                                  trainable=trainable)
            variable_summaries(idt, 'idt')
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
                # return activation_fn(hidden_bn)
            else:
                if use_timestep:
                    if mixed_prec is not None and not final_layer:
                        idt = tf.cast(
                            idt, get_precision(mixed_prec['compute_prec']))
                    hidden = tf.reshape(activation_fn(hidden),
                                        [-1, outputs_size]) * idt
                else:
                    hidden = tf.reshape(activation_fn(hidden),
                                        [-1, outputs_size])

        if mixed_prec is not None:
            hidden = tf.cast(hidden, get_precision(mixed_prec['output_prec']))
        return hidden
Beispiel #12
0
def embedding_net(xx,
                  network_size,
                  precision,
                  activation_fn=tf.nn.tanh,
                  resnet_dt=False,
                  name_suffix='',
                  stddev=1.0,
                  bavg=0.0,
                  seed=None,
                  trainable=True,
                  uniform_seed=False,
                  initial_variables=None,
                  mixed_prec=None):
    r"""The embedding network.

    The embedding network function :math:`\mathcal{N}` is constructed by is the
    composition of multiple layers :math:`\mathcal{L}^{(i)}`:

    .. math::
        \mathcal{N} = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)}
        \circ \cdots \circ \mathcal{L}^{(1)}

    A layer :math:`\mathcal{L}` is given by one of the following forms,
    depending on the number of nodes: [1]_

    .. math::
        \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})=
        \begin{cases}
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + \mathbf{x}, & N_2=N_1 \\
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + (\mathbf{x}, \mathbf{x}), & N_2 = 2N_1\\
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}), & \text{otherwise} \\
        \end{cases}

    where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}`
    is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and
    :math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively,
    both of which are trainable if `trainable` is `True`. :math:`\boldsymbol{\phi}`
    is the activation function.

    Parameters
    ----------
    xx : Tensor   
        Input tensor :math:`\mathbf{x}` of shape [-1,1]
    network_size: list of int
        Size of the embedding network. For example [16,32,64]
    precision: 
        Precision of network weights. For example, tf.float64
    activation_fn:
        Activation function :math:`\boldsymbol{\phi}`
    resnet_dt: boolean
        Using time-step in the ResNet construction
    name_suffix: str
        The name suffix append to each variable. 
    stddev: float
        Standard deviation of initializing network parameters
    bavg: float
        Mean of network intial bias
    seed: int
        Random seed for initializing network parameters
    trainable: boolean
        If the network is trainable
    uniform_seed : boolean
        Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed
    initial_variables : dict
        The input dict which stores the embedding net variables
    mixed_prec
        The input dict which stores the mixed precision setting for the embedding net


    References
    ----------
    .. [1] Kaiming  He,  Xiangyu  Zhang,  Shaoqing  Ren,  and  Jian  Sun. Identitymappings
       in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer
       International Publishing, 2016.
    """
    input_shape = xx.get_shape().as_list()
    outputs_size = [input_shape[1]] + network_size

    for ii in range(1, len(outputs_size)):
        w_initializer = tf.random_normal_initializer(
            stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]),
            seed=seed if (seed is None or uniform_seed) else seed + ii * 3 + 0)
        b_initializer = tf.random_normal_initializer(
            stddev=stddev,
            mean=bavg,
            seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 1)
        if initial_variables is not None:
            scope = tf.get_variable_scope().name
            w_initializer = tf.constant_initializer(
                initial_variables[scope + '/matrix_' + str(ii) + name_suffix])
            b_initializer = tf.constant_initializer(
                initial_variables[scope + '/bias_' + str(ii) + name_suffix])
        w = tf.get_variable('matrix_' + str(ii) + name_suffix,
                            [outputs_size[ii - 1], outputs_size[ii]],
                            precision,
                            w_initializer,
                            trainable=trainable)
        variable_summaries(w, 'matrix_' + str(ii) + name_suffix)

        b = tf.get_variable('bias_' + str(ii) + name_suffix,
                            [outputs_size[ii]],
                            precision,
                            b_initializer,
                            trainable=trainable)
        variable_summaries(b, 'bias_' + str(ii) + name_suffix)

        if mixed_prec is not None:
            xx = tf.cast(xx, get_precision(mixed_prec['compute_prec']))
            w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
            b = tf.cast(b, get_precision(mixed_prec['compute_prec']))
        hidden = tf.reshape(activation_fn(tf.nn.bias_add(tf.matmul(xx, w), b)),
                            [-1, outputs_size[ii]])
        if resnet_dt:
            idt_initializer = tf.random_normal_initializer(
                stddev=0.001,
                mean=1.0,
                seed=seed if
                (seed is None or uniform_seed) else seed + 3 * ii + 2)
            if initial_variables is not None:
                scope = tf.get_variable_scope().name
                idt_initializer = tf.constant_initializer(
                    initial_variables[scope + '/idt_' + str(ii) + name_suffix])
            idt = tf.get_variable('idt_' + str(ii) + name_suffix,
                                  [1, outputs_size[ii]],
                                  precision,
                                  idt_initializer,
                                  trainable=trainable)
            variable_summaries(idt, 'idt_' + str(ii) + name_suffix)
            if mixed_prec is not None:
                idt = tf.cast(idt, get_precision(mixed_prec['compute_prec']))

        if outputs_size[ii] == outputs_size[ii - 1]:
            if resnet_dt:
                xx += hidden * idt
            else:
                xx += hidden
        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
            if resnet_dt:
                xx = tf.concat([xx, xx], 1) + hidden * idt
            else:
                xx = tf.concat([xx, xx], 1) + hidden
        else:
            xx = hidden
    if mixed_prec is not None:
        xx = tf.cast(xx, get_precision(mixed_prec['output_prec']))
    return xx
Beispiel #13
0
    def __init__(self,
                 rcut: float,
                 rcut_smth: float,
                 sel: List[str],
                 neuron: List[int] = [24, 48, 96],
                 axis_neuron: int = 8,
                 resnet_dt: bool = False,
                 trainable: bool = True,
                 seed: int = None,
                 type_one_side: bool = True,
                 exclude_types: List[List[int]] = [],
                 set_davg_zero: bool = False,
                 activation_function: str = 'tanh',
                 precision: str = 'default',
                 uniform_seed: bool = False) -> None:
        """
        Constructor
        """
        if rcut < rcut_smth:
            raise RuntimeError(
                "rcut_smth (%f) should be no more than rcut (%f)!" %
                (rcut_smth, rcut))
        self.sel_a = sel
        self.rcut_r = rcut
        self.rcut_r_smth = rcut_smth
        self.filter_neuron = neuron
        self.n_axis_neuron = axis_neuron
        self.filter_resnet_dt = resnet_dt
        self.seed = seed
        self.uniform_seed = uniform_seed
        self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron)
        self.trainable = trainable
        self.compress_activation_fn = get_activation_func(activation_function)
        self.filter_activation_fn = get_activation_func(activation_function)
        self.filter_precision = get_precision(precision)
        self.exclude_types = set()
        for tt in exclude_types:
            assert (len(tt) == 2)
            self.exclude_types.add((tt[0], tt[1]))
            self.exclude_types.add((tt[1], tt[0]))
        self.set_davg_zero = set_davg_zero
        self.type_one_side = type_one_side

        # descrpt config
        self.sel_r = [0 for ii in range(len(self.sel_a))]
        self.ntypes = len(self.sel_a)
        assert (self.ntypes == len(self.sel_r))
        self.rcut_a = -1
        # numb of neighbors and numb of descrptors
        self.nnei_a = np.cumsum(self.sel_a)[-1]
        self.nnei_r = np.cumsum(self.sel_r)[-1]
        self.nnei = self.nnei_a + self.nnei_r
        self.ndescrpt_a = self.nnei_a * 4
        self.ndescrpt_r = self.nnei_r * 1
        self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r
        self.useBN = False
        self.dstd = None
        self.davg = None
        self.compress = False
        self.embedding_net_variables = None
        self.mixed_prec = None
        self.place_holders = {}
        nei_type = np.array([])
        for ii in range(self.ntypes):
            nei_type = np.append(nei_type,
                                 ii * np.ones(self.sel_a[ii]))  # like a mask
        self.nei_type = tf.constant(nei_type, dtype=tf.int32)

        avg_zero = np.zeros([self.ntypes,
                             self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        std_ones = np.ones([self.ntypes,
                            self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        sub_graph = tf.Graph()
        with sub_graph.as_default():
            name_pfx = 'd_sea_'
            for ii in ['coord', 'box']:
                self.place_holders[ii] = tf.placeholder(
                    GLOBAL_NP_FLOAT_PRECISION, [None, None],
                    name=name_pfx + 't_' + ii)
            self.place_holders['type'] = tf.placeholder(tf.int32, [None, None],
                                                        name=name_pfx +
                                                        't_type')
            self.place_holders['natoms_vec'] = tf.placeholder(
                tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms')
            self.place_holders['default_mesh'] = tf.placeholder(
                tf.int32, [None], name=name_pfx + 't_mesh')
            self.stat_descrpt, descrpt_deriv, rij, nlist \
                = op_module.prod_env_mat_a(self.place_holders['coord'],
                                         self.place_holders['type'],
                                         self.place_holders['natoms_vec'],
                                         self.place_holders['box'],
                                         self.place_holders['default_mesh'],
                                         tf.constant(avg_zero),
                                         tf.constant(std_ones),
                                         rcut_a = self.rcut_a,
                                         rcut_r = self.rcut_r,
                                         rcut_r_smth = self.rcut_r_smth,
                                         sel_a = self.sel_a,
                                         sel_r = self.sel_r)
        self.sub_sess = tf.Session(graph=sub_graph,
                                   config=default_tf_session_config)
        self.original_sel = None
Beispiel #14
0
    def __init__(self,
                 rcut: float,
                 rcut_smth: float,
                 sel: List[str],
                 neuron: List[int] = [24, 48, 96],
                 resnet_dt: bool = False,
                 trainable: bool = True,
                 seed: int = None,
                 set_davg_zero: bool = False,
                 activation_function: str = 'tanh',
                 precision: str = 'default',
                 uniform_seed: bool = False) -> None:
        """
        Constructor
        """
        self.sel_a = sel
        self.rcut_r = rcut
        self.rcut_r_smth = rcut_smth
        self.filter_neuron = neuron
        self.filter_resnet_dt = resnet_dt
        self.seed = seed
        self.uniform_seed = uniform_seed
        self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron)
        self.trainable = trainable
        self.filter_activation_fn = get_activation_func(activation_function)
        self.filter_precision = get_precision(precision)
        # self.exclude_types = set()
        # for tt in exclude_types:
        #     assert(len(tt) == 2)
        #     self.exclude_types.add((tt[0], tt[1]))
        #     self.exclude_types.add((tt[1], tt[0]))
        self.set_davg_zero = set_davg_zero

        # descrpt config
        self.sel_r = [0 for ii in range(len(self.sel_a))]
        self.ntypes = len(self.sel_a)
        assert (self.ntypes == len(self.sel_r))
        self.rcut_a = -1
        # numb of neighbors and numb of descrptors
        self.nnei_a = np.cumsum(self.sel_a)[-1]
        self.nnei_r = np.cumsum(self.sel_r)[-1]
        self.nnei = self.nnei_a + self.nnei_r
        self.ndescrpt_a = self.nnei_a * 4
        self.ndescrpt_r = self.nnei_r * 1
        self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r
        self.useBN = False
        self.dstd = None
        self.davg = None

        self.place_holders = {}
        avg_zero = np.zeros([self.ntypes,
                             self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        std_ones = np.ones([self.ntypes,
                            self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        sub_graph = tf.Graph()
        with sub_graph.as_default():
            name_pfx = 'd_sea_'
            for ii in ['coord', 'box']:
                self.place_holders[ii] = tf.placeholder(
                    GLOBAL_NP_FLOAT_PRECISION, [None, None],
                    name=name_pfx + 't_' + ii)
            self.place_holders['type'] = tf.placeholder(tf.int32, [None, None],
                                                        name=name_pfx +
                                                        't_type')
            self.place_holders['natoms_vec'] = tf.placeholder(
                tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms')
            self.place_holders['default_mesh'] = tf.placeholder(
                tf.int32, [None], name=name_pfx + 't_mesh')
            self.stat_descrpt, descrpt_deriv, rij, nlist \
                = op_module.prod_env_mat_a(self.place_holders['coord'],
                                         self.place_holders['type'],
                                         self.place_holders['natoms_vec'],
                                         self.place_holders['box'],
                                         self.place_holders['default_mesh'],
                                         tf.constant(avg_zero),
                                         tf.constant(std_ones),
                                         rcut_a = self.rcut_a,
                                         rcut_r = self.rcut_r,
                                         rcut_r_smth = self.rcut_r_smth,
                                         sel_a = self.sel_a,
                                         sel_r = self.sel_r)
        self.sub_sess = tf.Session(graph=sub_graph,
                                   config=default_tf_session_config)
Beispiel #15
0
    def __init__ (self, 
                  descrpt : tf.Tensor,
                  neuron : List[int] = [120,120,120],
                  resnet_dt : bool = True,
                  sel_type : List[int] = None,
                  fit_diag : bool = True,
                  scale : List[float] = None,
                  shift_diag : bool = True,     # YWolfeee: will support the user to decide whether to use this function
                  #diag_shift : List[float] = None, YWolfeee: will not support the user to assign a shift
                  seed : int = None,
                  activation_function : str = 'tanh',
                  precision : str = 'default',
                  uniform_seed: bool = False                  
    ) -> None:
        """
        Constructor

        Parameters
        ----------
        descrpt : tf.Tensor
                The descrptor
        neuron : List[int]
                Number of neurons in each hidden layer of the fitting net
        resnet_dt : bool
                Time-step `dt` in the resnet construction:
                y = x + dt * \phi (Wx + b)
        sel_type : List[int]
                The atom types selected to have an atomic polarizability prediction. If is None, all atoms are selected.
        fit_diag : bool
                Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix.
        scale : List[float]
                The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i]
        diag_shift : List[float]
                The diagonal part of the polarizability matrix of type i will be shifted by diag_shift[i]. The shift operation is carried out after scale.        
        seed : int
                Random seed for initializing the network parameters.
        activation_function : str
                The activation function in the embedding net. Supported options are {0}
        precision : str
                The precision of the embedding net parameters. Supported options are {1}                
        uniform_seed
                Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed
        """
        if not isinstance(descrpt, DescrptSeA) :
            raise RuntimeError('PolarFittingSeA only supports DescrptSeA')
        self.ntypes = descrpt.get_ntypes()
        self.dim_descrpt = descrpt.get_dim_out()
        # args = ClassArg()\
        #        .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
        #        .add('resnet_dt',        bool,   default = True)\
        #        .add('fit_diag',         bool,   default = True)\
        #        .add('diag_shift',       [list,float], default = [0.0 for ii in range(self.ntypes)])\
        #        .add('scale',            [list,float], default = [1.0 for ii in range(self.ntypes)])\
        #        .add('sel_type',         [list,int],   default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\
        #        .add('seed',             int)\
        #        .add("activation_function", str ,   default = "tanh")\
        #        .add('precision',           str,    default = "default")
        # class_data = args.parse(jdata)
        self.n_neuron = neuron
        self.resnet_dt = resnet_dt
        self.sel_type = sel_type
        self.fit_diag = fit_diag
        self.seed = seed
        self.uniform_seed = uniform_seed
        self.seed_shift = one_layer_rand_seed_shift()
        #self.diag_shift = diag_shift
        self.shift_diag = shift_diag
        self.scale = scale
        self.fitting_activation_fn = get_activation_func(activation_function)
        self.fitting_precision = get_precision(precision)
        if self.sel_type is None:
            self.sel_type = [ii for ii in range(self.ntypes)]
        if self.scale is None:
            self.scale = [1.0 for ii in range(self.ntypes)]
        #if self.diag_shift is None:
        #    self.diag_shift = [0.0 for ii in range(self.ntypes)]
        if type(self.sel_type) is not list:
            self.sel_type = [self.sel_type]
        self.constant_matrix = np.zeros(len(self.sel_type)) # len(sel_type) x 1, store the average diagonal value
        #if type(self.diag_shift) is not list:
        #    self.diag_shift = [self.diag_shift]
        if type(self.scale) is not list:
            self.scale = [self.scale]
        self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
        self.dim_rot_mat = self.dim_rot_mat_1 * 3
        self.useBN = False
        self.fitting_net_variables = None
        self.mixed_prec = None