Exemplo n.º 1
0
 def __init__(self, jdata, descrpt):
     # model param
     self.ntypes = descrpt.get_ntypes()
     self.dim_descrpt = descrpt.get_dim_out()
     args = ClassArg()\
            .add('numb_fparam',      int,    default = 0)\
            .add('numb_aparam',      int,    default = 0)\
            .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
            .add('resnet_dt',        bool,   default = True)\
            .add('rcond',            float,  default = 1e-3) \
            .add('seed',             int)               \
            .add('atom_ener',        list,   default = [])\
            .add("activation_function", str,    default = "tanh")\
            .add("precision",           str, default = "default")\
            .add("trainable",        [list, bool], default = True)
     class_data = args.parse(jdata)
     self.numb_fparam = class_data['numb_fparam']
     self.numb_aparam = class_data['numb_aparam']
     self.n_neuron = class_data['neuron']
     self.resnet_dt = class_data['resnet_dt']
     self.rcond = class_data['rcond']
     self.seed = class_data['seed']
     self.fitting_activation_fn = get_activation_func(
         class_data["activation_function"])
     self.fitting_precision = get_precision(class_data['precision'])
     self.trainable = class_data['trainable']
     if type(self.trainable) is bool:
         self.trainable = [self.trainable] * (len(self.n_neuron) + 1)
     assert (len(self.trainable) == len(self.n_neuron) +
             1), 'length of trainable should be that of n_neuron + 1'
     self.atom_ener = []
     for at, ae in enumerate(class_data['atom_ener']):
         if ae is not None:
             self.atom_ener.append(
                 tf.constant(ae,
                             global_tf_float_precision,
                             name="atom_%d_ener" % at))
         else:
             self.atom_ener.append(None)
     self.useBN = False
     self.bias_atom_e = None
     # data requirement
     if self.numb_fparam > 0:
         add_data_requirement('fparam',
                              self.numb_fparam,
                              atomic=False,
                              must=True,
                              high_prec=False)
         self.fparam_avg = None
         self.fparam_std = None
         self.fparam_inv_std = None
     if self.numb_aparam > 0:
         add_data_requirement('aparam',
                              self.numb_aparam,
                              atomic=True,
                              must=True,
                              high_prec=False)
         self.aparam_avg = None
         self.aparam_std = None
         self.aparam_inv_std = None
Exemplo n.º 2
0
 def __init__(self, jdata, **kwarg):
     self.starter_learning_rate = kwarg['starter_learning_rate']
     args = ClassArg()\
         .add('start_pref_e',        float,  default = 0.02)\
         .add('limit_pref_e',        float,  default = 1.00)\
         .add('start_pref_f',        float,  default = 1000)\
         .add('limit_pref_f',        float,  default = 1.00)\
         .add('start_pref_v',        float,  default = 0)\
         .add('limit_pref_v',        float,  default = 0)\
         .add('start_pref_ae',       float,  default = 0)\
         .add('limit_pref_ae',       float,  default = 0)\
         .add('start_pref_pf',       float,  default = 0)\
         .add('limit_pref_pf',       float,  default = 0)\
         .add('relative_f',          float)
     class_data = args.parse(jdata)
     self.start_pref_e = class_data['start_pref_e']
     self.limit_pref_e = class_data['limit_pref_e']
     self.start_pref_f = class_data['start_pref_f']
     self.limit_pref_f = class_data['limit_pref_f']
     self.start_pref_v = class_data['start_pref_v']
     self.limit_pref_v = class_data['limit_pref_v']
     self.start_pref_ae = class_data['start_pref_ae']
     self.limit_pref_ae = class_data['limit_pref_ae']
     self.start_pref_pf = class_data['start_pref_pf']
     self.limit_pref_pf = class_data['limit_pref_pf']
     self.relative_f = class_data['relative_f']
     self.has_e = (self.start_pref_e != 0 or self.limit_pref_e != 0)
     self.has_f = (self.start_pref_f != 0 or self.limit_pref_f != 0)
     self.has_v = (self.start_pref_v != 0 or self.limit_pref_v != 0)
     self.has_ae = (self.start_pref_ae != 0 or self.limit_pref_ae != 0)
     self.has_pf = (self.start_pref_pf != 0 or self.limit_pref_pf != 0)
     # data required
     add_data_requirement('energy',
                          1,
                          atomic=False,
                          must=False,
                          high_prec=True)
     add_data_requirement('force',
                          3,
                          atomic=True,
                          must=False,
                          high_prec=False)
     add_data_requirement('virial',
                          9,
                          atomic=False,
                          must=False,
                          high_prec=False)
     add_data_requirement('atom_ener',
                          1,
                          atomic=True,
                          must=False,
                          high_prec=False)
     add_data_requirement('atom_pref',
                          1,
                          atomic=True,
                          must=False,
                          high_prec=False,
                          repeat=3)
Exemplo n.º 3
0
 def __init__(self, jdata, **kwarg):
     model = kwarg['model']
     # data required
     add_data_requirement('wfc',
                          model.get_wfc_numb() * 3,
                          atomic=True,
                          must=True,
                          high_prec=False,
                          type_sel=model.get_sel_type())
Exemplo n.º 4
0
 def __init__(
     self,
     rcut: float,
     rcut_smth: float,
     sel: List[str],
     neuron: List[int] = [24, 48, 96],
     axis_neuron: int = 8,
     resnet_dt: bool = False,
     trainable: bool = True,
     seed: int = None,
     type_one_side: bool = True,
     type_nchanl: int = 2,
     type_nlayer: int = 1,
     numb_aparam: int = 0,
     set_davg_zero: bool = False,
     activation_function: str = 'tanh',
     precision: str = 'default',
     exclude_types: List[List[int]] = [],
 ) -> None:
     """
     Constructor
     """
     # args = ClassArg()\
     #        .add('type_nchanl',      int,    default = 4) \
     #        .add('type_nlayer',      int,    default = 2) \
     #        .add('type_one_side',    bool,   default = True) \
     #        .add('numb_aparam',      int,    default = 0)
     # class_data = args.parse(jdata)
     DescrptSeA.__init__(self,
                         rcut,
                         rcut_smth,
                         sel,
                         neuron=neuron,
                         axis_neuron=axis_neuron,
                         resnet_dt=resnet_dt,
                         trainable=trainable,
                         seed=seed,
                         type_one_side=type_one_side,
                         set_davg_zero=set_davg_zero,
                         activation_function=activation_function,
                         precision=precision)
     self.type_nchanl = type_nchanl
     self.type_nlayer = type_nlayer
     self.type_one_side = type_one_side
     self.numb_aparam = numb_aparam
     if self.numb_aparam > 0:
         add_data_requirement('aparam',
                              3,
                              atomic=True,
                              must=True,
                              high_prec=False)
Exemplo n.º 5
0
 def __init__(self, jdata, **kwarg):
     try:
         model = kwarg['model']
         type_sel = model.get_sel_type()
     except:
         type_sel = None
     self.tensor_name = kwarg['tensor_name']
     self.tensor_size = kwarg['tensor_size']
     self.label_name = kwarg['label_name']
     # data required
     add_data_requirement(self.label_name,
                          self.tensor_size,
                          atomic=True,
                          must=True,
                          high_prec=False,
                          type_sel=model.get_sel_type())
Exemplo n.º 6
0
    def __init__(self, jdata, **kwarg):
        model = kwarg.get('model', None)
        if model is not None:
            self.type_sel = model.get_sel_type()
        else:
            self.type_sel = None
        self.tensor_name = kwarg['tensor_name']
        self.tensor_size = kwarg['tensor_size']
        self.label_name = kwarg['label_name']
        if jdata is not None:
            self.scale = jdata.get('scale', 1.0)
        else:
            self.scale = 1.0

        # YHT: added for global / local dipole combination
        assert jdata is not None, "Please provide loss parameters!"
        # YWolfeee: modify, use pref / pref_atomic, instead of pref_weight / pref_atomic_weight
        self.local_weight = jdata.get('pref_atomic', None)
        self.global_weight = jdata.get('pref', None)

        assert (self.local_weight is not None
                and self.global_weight is not None
                ), "Both `pref` and `pref_atomic` should be provided."
        assert self.local_weight >= 0.0 and self.global_weight >= 0.0, "Can not assign negative weight to `pref` and `pref_atomic`"
        assert (self.local_weight > 0.0) or (
            self.global_weight > 0.0), AssertionError(
                'Can not assian zero weight both to `pref` and `pref_atomic`')

        # data required
        add_data_requirement("atomic_" + self.label_name,
                             self.tensor_size,
                             atomic=True,
                             must=False,
                             high_prec=False,
                             type_sel=self.type_sel)
        add_data_requirement(self.label_name,
                             self.tensor_size,
                             atomic=False,
                             must=False,
                             high_prec=False,
                             type_sel=self.type_sel)
Exemplo n.º 7
0
 def __init__(self, jdata, **kwarg):
     try:
         model = kwarg['model']
         type_sel = model.get_sel_type()
     except:
         type_sel = None
     self.tensor_name = kwarg['tensor_name']
     self.tensor_size = kwarg['tensor_size']
     self.label_name = kwarg['label_name']
     self.atomic = kwarg.get('atomic', True)
     if jdata is not None:
         self.scale = jdata.get('scale', 1.0)
     else:
         self.scale = 1.0
     # data required
     add_data_requirement(self.label_name,
                          self.tensor_size,
                          atomic=self.atomic,
                          must=True,
                          high_prec=False,
                          type_sel=type_sel)
Exemplo n.º 8
0
 def __init__ (self, 
               starter_learning_rate : float,
               start_pref_e : float = 0.1,
               limit_pref_e : float = 1.0,
               start_pref_ed : float = 1.0,
               limit_pref_ed : float = 1.0
 ) -> None :
     self.starter_learning_rate = kwarg['starter_learning_rate']
     args = ClassArg()\
         .add('start_pref_e',        float,  must = True, default = 0.1) \
         .add('limit_pref_e',        float,  must = True, default = 1.00)\
         .add('start_pref_ed',       float,  must = True, default = 1.00)\
         .add('limit_pref_ed',       float,  must = True, default = 1.00)
     class_data = args.parse(jdata)
     self.start_pref_e = class_data['start_pref_e']
     self.limit_pref_e = class_data['limit_pref_e']
     self.start_pref_ed = class_data['start_pref_ed']
     self.limit_pref_ed = class_data['limit_pref_ed']
     # data required
     add_data_requirement('energy', 1, atomic=False, must=True, high_prec=True)
     add_data_requirement('energy_dipole', 3, atomic=False, must=True, high_prec=False)
Exemplo n.º 9
0
 def __init__(self, jdata, descrpt):
     # model param
     self.ntypes = descrpt.get_ntypes()
     self.dim_descrpt = descrpt.get_dim_out()
     args = ClassArg()\
            .add('numb_fparam',      int,    default = 0)\
            .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
            .add('resnet_dt',        bool,   default = True)\
            .add('seed',             int)
     class_data = args.parse(jdata)
     self.numb_fparam = class_data['numb_fparam']
     self.n_neuron = class_data['neuron']
     self.resnet_dt = class_data['resnet_dt']
     self.seed = class_data['seed']
     self.useBN = False
     # data requirement
     if self.numb_fparam > 0:
         add_data_requirement('fparam',
                              self.numb_fparam,
                              atomic=False,
                              must=False,
                              high_prec=False)
Exemplo n.º 10
0
 def __init__ (self, 
               starter_learning_rate : float, 
               start_pref_e : float = 0.02,
               limit_pref_e : float = 1.00,
               start_pref_f : float = 1000,
               limit_pref_f : float = 1.00,
               start_pref_v : float = 0.0,
               limit_pref_v : float = 0.0,
               start_pref_ae : float = 0.0,
               limit_pref_ae : float = 0.0,
               start_pref_pf : float = 0.0,
               limit_pref_pf : float = 0.0,
               relative_f : float = None 
 ) -> None:
     self.starter_learning_rate = starter_learning_rate
     self.start_pref_e = start_pref_e
     self.limit_pref_e = limit_pref_e
     self.start_pref_f = start_pref_f
     self.limit_pref_f = limit_pref_f
     self.start_pref_v = start_pref_v
     self.limit_pref_v = limit_pref_v
     self.start_pref_ae = start_pref_ae
     self.limit_pref_ae = limit_pref_ae
     self.start_pref_pf = start_pref_pf
     self.limit_pref_pf = limit_pref_pf
     self.relative_f = relative_f
     self.has_e = (self.start_pref_e != 0.0 or self.limit_pref_e != 0.0)
     self.has_f = (self.start_pref_f != 0.0 or self.limit_pref_f != 0.0)
     self.has_v = (self.start_pref_v != 0.0 or self.limit_pref_v != 0.0)
     self.has_ae = (self.start_pref_ae != 0.0 or self.limit_pref_ae != 0.0)
     self.has_pf = (self.start_pref_pf != 0.0 or self.limit_pref_pf != 0.0)
     # data required
     add_data_requirement('energy', 1, atomic=False, must=False, high_prec=True)
     add_data_requirement('force',  3, atomic=True,  must=False, high_prec=False)
     add_data_requirement('virial', 9, atomic=False, must=False, high_prec=False)
     add_data_requirement('atom_ener', 1, atomic=True, must=False, high_prec=False)
     add_data_requirement('atom_pref', 1, atomic=True, must=False, high_prec=False, repeat=3)
Exemplo n.º 11
0
    def __init__(self,
                 descrpt: tf.Tensor,
                 neuron: List[int] = [120, 120, 120],
                 resnet_dt: bool = True,
                 numb_fparam: int = 0,
                 numb_aparam: int = 0,
                 rcond: float = 1e-3,
                 tot_ener_zero: bool = False,
                 trainable: List[bool] = None,
                 seed: int = None,
                 atom_ener: List[float] = [],
                 activation_function: str = 'tanh',
                 precision: str = 'default',
                 uniform_seed: bool = False) -> None:
        """
        Constructor
        """
        # model param
        self.ntypes = descrpt.get_ntypes()
        self.dim_descrpt = descrpt.get_dim_out()
        # args = ()\
        #        .add('numb_fparam',      int,    default = 0)\
        #        .add('numb_aparam',      int,    default = 0)\
        #        .add('neuron',           list,   default = [120,120,120], alias = 'n_neuron')\
        #        .add('resnet_dt',        bool,   default = True)\
        #        .add('rcond',            float,  default = 1e-3) \
        #        .add('tot_ener_zero',    bool,   default = False) \
        #        .add('seed',             int)               \
        #        .add('atom_ener',        list,   default = [])\
        #        .add("activation_function", str,    default = "tanh")\
        #        .add("precision",           str, default = "default")\
        #        .add("trainable",        [list, bool], default = True)
        self.numb_fparam = numb_fparam
        self.numb_aparam = numb_aparam
        self.n_neuron = neuron
        self.resnet_dt = resnet_dt
        self.rcond = rcond
        self.seed = seed
        self.uniform_seed = uniform_seed
        self.seed_shift = one_layer_rand_seed_shift()
        self.tot_ener_zero = tot_ener_zero
        self.fitting_activation_fn = get_activation_func(activation_function)
        self.fitting_precision = get_precision(precision)
        self.trainable = trainable
        if self.trainable is None:
            self.trainable = [True for ii in range(len(self.n_neuron) + 1)]
        if type(self.trainable) is bool:
            self.trainable = [self.trainable] * (len(self.n_neuron) + 1)
        assert (len(self.trainable) == len(self.n_neuron) +
                1), 'length of trainable should be that of n_neuron + 1'
        self.atom_ener = []
        self.atom_ener_v = atom_ener
        for at, ae in enumerate(atom_ener):
            if ae is not None:
                self.atom_ener.append(
                    tf.constant(ae,
                                self.fitting_precision,
                                name="atom_%d_ener" % at))
            else:
                self.atom_ener.append(None)
        self.useBN = False
        self.bias_atom_e = np.zeros(self.ntypes, dtype=np.float64)
        # data requirement
        if self.numb_fparam > 0:
            add_data_requirement('fparam',
                                 self.numb_fparam,
                                 atomic=False,
                                 must=True,
                                 high_prec=False)
            self.fparam_avg = None
            self.fparam_std = None
            self.fparam_inv_std = None
        if self.numb_aparam > 0:
            add_data_requirement('aparam',
                                 self.numb_aparam,
                                 atomic=True,
                                 must=True,
                                 high_prec=False)
            self.aparam_avg = None
            self.aparam_std = None
            self.aparam_inv_std = None

        self.fitting_net_variables = None
        self.mixed_prec = None
Exemplo n.º 12
0
    def __init__(
        self,
        op,
        rcut: float,
        rcut_smth: float,
        sel: List[str],
        neuron: List[int] = [24, 48, 96],
        axis_neuron: int = 8,
        resnet_dt: bool = False,
        trainable: bool = True,
        seed: int = None,
        type_one_side: bool = True,
        exclude_types: List[List[int]] = [],
        set_davg_zero: bool = False,
        activation_function: str = 'tanh',
        precision: str = 'default',
        uniform_seed: bool = False,
    ) -> None:
        DescrptSeA.__init__(self, rcut, rcut_smth, sel, neuron, axis_neuron,
                            resnet_dt, trainable, seed, type_one_side,
                            exclude_types, set_davg_zero, activation_function,
                            precision, uniform_seed)
        # DescrptSeA.__init__(self, **jdata)
        # args = ClassArg()\
        #        .add('sel',      list,   must = True) \
        #        .add('rcut',     float,  default = 6.0) \
        #        .add('rcut_smth',float,  default = 5.5) \
        #        .add('neuron',   list,   default = [10, 20, 40]) \
        #        .add('axis_neuron', int, default = 4, alias = 'n_axis_neuron') \
        #        .add('resnet_dt',bool,   default = False) \
        #        .add('trainable',bool,   default = True) \
        #        .add('seed',     int)
        # class_data = args.parse(jdata)
        # self.sel_a = class_data['sel']
        # self.rcut_r = class_data['rcut']
        # self.rcut_r_smth = class_data['rcut_smth']
        # self.filter_neuron = class_data['neuron']
        # self.n_axis_neuron = class_data['axis_neuron']
        # self.filter_resnet_dt = class_data['resnet_dt']
        # self.seed = class_data['seed']
        # self.trainable = class_data['trainable']
        self.sel_a = sel
        self.rcut_r = rcut
        self.rcut_r_smth = rcut_smth
        self.filter_neuron = neuron
        self.n_axis_neuron = axis_neuron
        self.filter_resnet_dt = resnet_dt
        self.seed = seed
        self.trainable = trainable
        self.op = op

        # descrpt config
        self.sel_r = [0 for ii in range(len(self.sel_a))]
        self.ntypes = len(self.sel_a)
        assert (self.ntypes == len(self.sel_r))
        self.rcut_a = -1
        # numb of neighbors and numb of descrptors
        self.nnei_a = np.cumsum(self.sel_a)[-1]
        self.nnei_r = np.cumsum(self.sel_r)[-1]
        self.nnei = self.nnei_a + self.nnei_r
        self.ndescrpt_a = self.nnei_a * 4
        self.ndescrpt_r = self.nnei_r * 1
        self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r
        self.useBN = False
        self.dstd = None
        self.davg = None

        add_data_requirement('efield',
                             3,
                             atomic=True,
                             must=True,
                             high_prec=False)

        self.place_holders = {}
        avg_zero = np.zeros([self.ntypes,
                             self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        std_ones = np.ones([self.ntypes,
                            self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
        sub_graph = tf.Graph()
        with sub_graph.as_default():
            name_pfx = 'd_sea_ef_'
            for ii in ['coord', 'box']:
                self.place_holders[ii] = tf.placeholder(
                    GLOBAL_NP_FLOAT_PRECISION, [None, None],
                    name=name_pfx + 't_' + ii)
            self.place_holders['type'] = tf.placeholder(tf.int32, [None, None],
                                                        name=name_pfx +
                                                        't_type')
            self.place_holders['natoms_vec'] = tf.placeholder(
                tf.int32, [self.ntypes + 2], name=name_pfx + 't_natoms')
            self.place_holders['default_mesh'] = tf.placeholder(
                tf.int32, [None], name=name_pfx + 't_mesh')
            self.place_holders['efield'] = tf.placeholder(
                GLOBAL_NP_FLOAT_PRECISION, [None, None],
                name=name_pfx + 't_efield')
            self.stat_descrpt, descrpt_deriv, rij, nlist \
                = self.op(self.place_holders['coord'],
                          self.place_holders['type'],
                          self.place_holders['natoms_vec'],
                          self.place_holders['box'],
                          self.place_holders['default_mesh'],
                          self.place_holders['efield'],
                          tf.constant(avg_zero),
                          tf.constant(std_ones),
                          rcut_a = self.rcut_a,
                          rcut_r = self.rcut_r,
                          rcut_r_smth = self.rcut_r_smth,
                          sel_a = self.sel_a,
                          sel_r = self.sel_r)
        self.sub_sess = tf.Session(graph=sub_graph,
                                   config=default_tf_session_config)