예제 #1
0
    def test_json_yaml_equal(self):

        inputs = ("water_v1", "water_se_a_v1")

        for i in inputs:
            jdata = j_loader(os.path.join('yaml_inputs', f'{i}.json'))
            ydata = j_loader(os.path.join('yaml_inputs', f'{i}.yaml'))
            self.assertEqual(jdata, ydata)

        with self.assertRaises(TypeError):
            j_loader("path_with_wrong.extension")
예제 #2
0
def compute_efv(jfile):
    jdata = j_loader(jfile)
    run_opt = RunOptions(None)
    systems = j_must_have(jdata, 'systems')
    set_pfx = j_must_have(jdata, 'set_prefix')
    batch_size = j_must_have(jdata, 'batch_size')
    test_size = j_must_have(jdata, 'numb_test')
    batch_size = 1
    test_size = 1
    rcut = j_must_have(jdata, 'rcut')

    data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt)

    tot_numb_batches = sum(data.get_nbatches())
    lr = LearingRate(jdata, tot_numb_batches)

    model = NNPModel(jdata, run_opt=run_opt)
    model.build(data, lr)

    test_data = data.get_test()

    feed_dict_test = {
        model.t_prop_c:
        test_data["prop_c"],
        model.t_energy:
        test_data["energy"][:model.numb_test],
        model.t_force:
        np.reshape(test_data["force"][:model.numb_test, :], [-1]),
        model.t_virial:
        np.reshape(test_data["virial"][:model.numb_test, :], [-1]),
        model.t_atom_ener:
        np.reshape(test_data["atom_ener"][:model.numb_test, :], [-1]),
        model.t_atom_pref:
        np.reshape(test_data["atom_pref"][:model.numb_test, :], [-1]),
        model.t_coord:
        np.reshape(test_data["coord"][:model.numb_test, :], [-1]),
        model.t_box:
        test_data["box"][:model.numb_test, :],
        model.t_type:
        np.reshape(test_data["type"][:model.numb_test, :], [-1]),
        model.t_natoms:
        test_data["natoms_vec"],
        model.t_mesh:
        test_data["default_mesh"],
        model.t_fparam:
        np.reshape(test_data["fparam"][:model.numb_test, :], [-1]),
        model.is_training:
        False
    }

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    [e, f, v] = sess.run([model.energy, model.force, model.virial],
                         feed_dict=feed_dict_test)
    return e, f, v
예제 #3
0
 def test (self) :
     jdata = j_loader(input_json)
     jdata = jdata['model']
     descrpt = DescrptSeA(jdata['descriptor'])
     fitting = EnerFitting(jdata['fitting_net'], descrpt)
     avgs = [0, 10]
     stds = [2, 0.4]
     sys_natoms = [10, 100]
     sys_nframes = [5, 2]
     all_data = _make_fake_data(sys_natoms, sys_nframes, avgs, stds)
     frefa, frefs = _brute_fparam(all_data, len(avgs))
     arefa, arefs = _brute_aparam(all_data, len(avgs))
     fitting.compute_input_stats(all_data, protection = 1e-2)
     # print(frefa, frefs)
     for ii in range(len(avgs)):
         self.assertAlmostEqual(frefa[ii], fitting.fparam_avg[ii])
         self.assertAlmostEqual(frefs[ii], fitting.fparam_std[ii])
         self.assertAlmostEqual(arefa[ii], fitting.aparam_avg[ii])
         self.assertAlmostEqual(arefs[ii], fitting.aparam_std[ii])
예제 #4
0
    def _setUp(self):
        args = Args()
        run_opt = RunOptions(args, False)
        jdata = j_loader(args.INPUT)

        # init model
        model = NNPTrainer (jdata, run_opt = run_opt)
        rcut = model.model.get_rcut()

        # init data system
        systems = j_must_have(jdata['training'], 'systems')
        set_pfx = j_must_have(jdata['training'], 'set_prefix')
        batch_size = j_must_have(jdata['training'], 'batch_size')
        test_size = j_must_have(jdata['training'], 'numb_test')    
        data = DeepmdDataSystem(systems, 
                                batch_size, 
                                test_size, 
                                rcut, 
                                set_prefix=set_pfx)
        data.add_dict(data_requirement)

        # clear the default graph
        tf.reset_default_graph()

        # build the model with stats from the first system
        model.build (data)
        
        # freeze the graph
        with tf.Session() as sess:
            init_op = tf.global_variables_initializer()
            sess.run(init_op)
            graph = tf.get_default_graph()
            input_graph_def = graph.as_graph_def()
            nodes = "o_dipole,o_rmat,o_rmat_deriv,o_nlist,o_rij,descrpt_attr/rcut,descrpt_attr/ntypes,descrpt_attr/sel,descrpt_attr/ndescrpt,model_attr/tmap,model_attr/sel_type,model_attr/model_type"
            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess,
                input_graph_def,
                nodes.split(",") 
            )
            output_graph = os.path.join(modifier_datapath, 'dipole.pb')
            with tf.gfile.GFile(output_graph, "wb") as f:
                f.write(output_graph_def.SerializeToString())
예제 #5
0
def train(args):
    # load json database
    jdata = j_loader(args.INPUT)

    if not 'model' in jdata.keys():
        jdata = convert_input_v0_v1(jdata,
                                    warning=True,
                                    dump='input_v1_compat.json')

    jdata = normalize(jdata)
    with open(args.output, 'w') as fp:
        json.dump(jdata, fp, indent=4)

    # run options
    with_distrib = False
    if 'with_distrib' in jdata:
        with_distrib = jdata['with_distrib']
    run_opt = RunOptions(args, with_distrib)
    run_opt.print_welcome()
    run_opt.print_citation()
    run_opt.print_summary()

    if run_opt.is_distrib:
        # distributed training
        if run_opt.my_job_name == "ps":
            queue = create_done_queue(run_opt.cluster_spec,
                                      run_opt.my_task_index)
            wait_done_queue(run_opt.cluster_spec, run_opt.server, queue,
                            run_opt.my_task_index)
            #server.join()
        elif run_opt.my_job_name == "worker":
            done_ops = connect_done_queue(run_opt.cluster_spec,
                                          run_opt.my_task_index)
            _do_work(jdata, run_opt)
            fill_done_queue(run_opt.cluster_spec, run_opt.server, done_ops,
                            run_opt.my_task_index)
        else:
            raise RuntimeError("unknown job name")
    else:
        # serial training
        _do_work(jdata, run_opt)
예제 #6
0
    def test_model(self):
        jfile = 'water_se_a.json'
        jdata = j_loader(jfile)

        run_opt = RunOptions(None) 
        systems = j_must_have(jdata, 'systems')
        set_pfx = j_must_have(jdata, 'set_prefix')
        batch_size = j_must_have(jdata, 'batch_size')
        test_size = j_must_have(jdata, 'numb_test')
        batch_size = 1
        test_size = 1
        stop_batch = j_must_have(jdata, 'stop_batch')
        rcut = j_must_have (jdata['model']['descriptor'], 'rcut')
        
        data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None)
        
        test_data = data.get_test ()
        numb_test = 1
        
        descrpt = DescrptSeA(jdata['model']['descriptor'])
        fitting = EnerFitting(jdata['model']['fitting_net'], descrpt)
        model = Model(jdata['model'], descrpt, fitting)

        # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']])
        input_data = {'coord' : [test_data['coord']], 
                      'box': [test_data['box']], 
                      'type': [test_data['type']],
                      'natoms_vec' : [test_data['natoms_vec']],
                      'default_mesh' : [test_data['default_mesh']]
        }
        model._compute_input_stat(input_data)
        model.descrpt.bias_atom_e = data.compute_energy_shift()

        t_prop_c           = tf.placeholder(tf.float32, [5],    name='t_prop_c')
        t_energy           = tf.placeholder(global_ener_float_precision, [None], name='t_energy')
        t_force            = tf.placeholder(global_tf_float_precision, [None], name='t_force')
        t_virial           = tf.placeholder(global_tf_float_precision, [None], name='t_virial')
        t_atom_ener        = tf.placeholder(global_tf_float_precision, [None], name='t_atom_ener')
        t_coord            = tf.placeholder(global_tf_float_precision, [None], name='i_coord')
        t_type             = tf.placeholder(tf.int32,   [None], name='i_type')
        t_natoms           = tf.placeholder(tf.int32,   [model.ntypes+2], name='i_natoms')
        t_box              = tf.placeholder(global_tf_float_precision, [None, 9], name='i_box')
        t_mesh             = tf.placeholder(tf.int32,   [None], name='i_mesh')
        is_training        = tf.placeholder(tf.bool)
        t_fparam = None

        model_pred\
            = model.build (t_coord, 
                           t_type, 
                           t_natoms, 
                           t_box, 
                           t_mesh,
                           t_fparam,
                           suffix = "se_a_srtab", 
                           reuse = False)
        energy = model_pred['energy']
        force  = model_pred['force']
        virial = model_pred['virial']
        atom_ener =  model_pred['atom_ener']

        feed_dict_test = {t_prop_c:        test_data['prop_c'],
                          t_energy:        test_data['energy']              [:numb_test],
                          t_force:         np.reshape(test_data['force']    [:numb_test, :], [-1]),
                          t_virial:        np.reshape(test_data['virial']   [:numb_test, :], [-1]),
                          t_atom_ener:     np.reshape(test_data['atom_ener'][:numb_test, :], [-1]),
                          t_coord:         np.reshape(test_data['coord']    [:numb_test, :], [-1]),
                          t_box:           test_data['box']                 [:numb_test, :],
                          t_type:          np.reshape(test_data['type']     [:numb_test, :], [-1]),
                          t_natoms:        test_data['natoms_vec'],
                          t_mesh:          test_data['default_mesh'],
                          is_training:     False}

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        [e, f, v] = sess.run([energy, force, virial], 
                             feed_dict = feed_dict_test)

        e = e.reshape([-1])
        f = f.reshape([-1])
        v = v.reshape([-1])

        refe = [6.135449167779321300e+01]
        reff = [7.799691562262310585e-02,9.423098804815030483e-02,3.790560997388224204e-03,1.432522403799846578e-01,1.148392791403983204e-01,-1.321871172563671148e-02,-7.318966526325138000e-02,6.516069212737778116e-02,5.406418483320515412e-04,5.870713761026503247e-02,-1.605402669549013672e-01,-5.089516979826595386e-03,-2.554593467731766654e-01,3.092063507347833987e-02,1.510355029451411479e-02,4.869271842355533952e-02,-1.446113274345035005e-01,-1.126524434771078789e-03]
        refv = [-6.076776685178300053e-01,1.103174323630009418e-01,1.984250991380156690e-02,1.103174323630009557e-01,-3.319759402259439551e-01,-6.007404107650986258e-03,1.984250991380157036e-02,-6.007404107650981921e-03,-1.200076017439753642e-03]
        refe = np.reshape(refe, [-1])
        reff = np.reshape(reff, [-1])
        refv = np.reshape(refv, [-1])

        places = 10
        for ii in range(e.size) :
            self.assertAlmostEqual(e[ii], refe[ii], places = places)
        for ii in range(f.size) :
            self.assertAlmostEqual(f[ii], reff[ii], places = places)
        for ii in range(v.size) :
            self.assertAlmostEqual(v[ii], refv[ii], places = places)
예제 #7
0
def compress(*, input: str, output: str, extrapolate: int, step: float,
             frequency: str, checkpoint_folder: str, training_script: str,
             mpi_log: str, log_path: Optional[str], log_level: int, **kwargs):
    """Compress model.

    The table is composed of fifth-order polynomial coefficients and is assembled from
    two sub-tables. The first table takes the step parameter as the domain's uniform step size,
    while the second table takes 10 * step as it's uniform step size. The range of the
    first table is automatically detected by the code, while the second table ranges
    from the first table's upper boundary(upper) to the extrapolate(parameter) * upper.

    Parameters
    ----------
    input : str
        frozen model file to compress
    output : str
        compressed model filename
    extrapolate : int
        scale of model extrapolation
    step : float
        uniform step size of the tabulation's first table
    frequency : str
        frequency of tabulation overflow check
    checkpoint_folder : str
        trining checkpoint folder for freezing
    training_script : str
        training script of the input frozen model
    mpi_log : str
        mpi logging mode for training
    log_path : Optional[str]
        if speccified log will be written to this file
    log_level : int
        logging level
    """
    try:
        t_jdata = get_tensor_by_name(input, 'train_attr/training_script')
        t_min_nbor_dist = get_tensor_by_name(input, 'train_attr/min_nbor_dist')
        jdata = json.loads(t_jdata)
    except GraphWithoutTensorError as e:
        if training_script == None:
            raise RuntimeError(
                "The input frozen model: %s has no training script or min_nbor_dist information, "
                "which is not supported by the model compression interface. "
                "Please consider using the --training-script command within the model compression interface to provide the training script of the input frozen model. "
                "Note that the input training script must contain the correct path to the training data."
                % input) from e
        elif not os.path.exists(training_script):
            raise RuntimeError(
                "The input training script %s (%s) does not exist! Please check the path of the training script. "
                % (input, os.path.abspath(input))) from e
        else:
            log.info("stage 0: compute the min_nbor_dist")
            jdata = j_loader(training_script)
            jdata = update_deepmd_input(jdata)
            t_min_nbor_dist = get_min_nbor_dist(jdata, get_rcut(jdata))

    _check_compress_type(input)

    tf.constant(t_min_nbor_dist,
                name='train_attr/min_nbor_dist',
                dtype=GLOBAL_ENER_FLOAT_PRECISION)
    jdata["model"]["compress"] = {}
    jdata["model"]["compress"]["model_file"] = input
    jdata["model"]["compress"]["min_nbor_dist"] = t_min_nbor_dist
    jdata["model"]["compress"]["table_config"] = [
        extrapolate,
        step,
        10 * step,
        int(frequency),
    ]
    jdata["training"]["save_ckpt"] = "model-compression/model.ckpt"
    jdata = update_deepmd_input(jdata)
    jdata = normalize(jdata)

    # check the descriptor info of the input file
    # move to the specific Descriptor class

    # stage 1: training or refining the model with tabulation
    log.info("\n\n")
    log.info("stage 1: compress the model")
    control_file = "compress.json"
    with open(control_file, "w") as fp:
        json.dump(jdata, fp, indent=4)
    try:
        train(
            INPUT=control_file,
            init_model=None,
            restart=None,
            init_frz_model=None,
            output=control_file,
            mpi_log=mpi_log,
            log_level=log_level,
            log_path=log_path,
            is_compress=True,
        )
    except GraphTooLargeError as e:
        raise RuntimeError(
            "The uniform step size of the tabulation's first table is %f, "
            "which is too small. This leads to a very large graph size, "
            "exceeding protobuf's limitation (2 GB). You should try to "
            "increase the step size." % step) from e

    # stage 2: freeze the model
    log.info("\n\n")
    log.info("stage 2: freeze the model")
    try:
        freeze(checkpoint_folder=checkpoint_folder,
               output=output,
               node_names=None)
    except GraphTooLargeError as e:
        raise RuntimeError(
            "The uniform step size of the tabulation's first table is %f, "
            "which is too small. This leads to a very large graph size, "
            "exceeding protobuf's limitation (2 GB). You should try to "
            "increase the step size." % step) from e
예제 #8
0
    def test_model(self):
        jfile = 'water_se_r.json'
        jdata = j_loader(jfile)

        run_opt = RunOptions(None) 
        systems = j_must_have(jdata, 'systems')
        set_pfx = j_must_have(jdata, 'set_prefix')
        batch_size = j_must_have(jdata, 'batch_size')
        test_size = j_must_have(jdata, 'numb_test')
        batch_size = 1
        test_size = 1
        stop_batch = j_must_have(jdata, 'stop_batch')
        rcut = j_must_have (jdata['model']['descriptor'], 'rcut')
        
        data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None)
        
        test_data = data.get_test ()
        numb_test = 1
        
        descrpt = DescrptSeR(jdata['model']['descriptor'])
        fitting = EnerFitting(jdata['model']['fitting_net'], descrpt)
        model = Model(jdata['model'], descrpt, fitting)

        # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']])
        input_data = {'coord' : [test_data['coord']], 
                      'box': [test_data['box']], 
                      'type': [test_data['type']],
                      'natoms_vec' : [test_data['natoms_vec']],
                      'default_mesh' : [test_data['default_mesh']]
        }
        model._compute_input_stat(input_data)
        model.descrpt.bias_atom_e = data.compute_energy_shift()

        t_prop_c           = tf.placeholder(tf.float32, [5],    name='t_prop_c')
        t_energy           = tf.placeholder(global_ener_float_precision, [None], name='t_energy')
        t_force            = tf.placeholder(global_tf_float_precision, [None], name='t_force')
        t_virial           = tf.placeholder(global_tf_float_precision, [None], name='t_virial')
        t_atom_ener        = tf.placeholder(global_tf_float_precision, [None], name='t_atom_ener')
        t_coord            = tf.placeholder(global_tf_float_precision, [None], name='i_coord')
        t_type             = tf.placeholder(tf.int32,   [None], name='i_type')
        t_natoms           = tf.placeholder(tf.int32,   [model.ntypes+2], name='i_natoms')
        t_box              = tf.placeholder(global_tf_float_precision, [None, 9], name='i_box')
        t_mesh             = tf.placeholder(tf.int32,   [None], name='i_mesh')
        is_training        = tf.placeholder(tf.bool)
        t_fparam = None

        model_pred\
            = model.build (t_coord, 
                           t_type, 
                           t_natoms, 
                           t_box, 
                           t_mesh,
                           t_fparam,
                           suffix = "se_r", 
                           reuse = False)
        energy = model_pred['energy']
        force  = model_pred['force']
        virial = model_pred['virial']
        atom_ener =  model_pred['atom_ener']

        feed_dict_test = {t_prop_c:        test_data['prop_c'],
                          t_energy:        test_data['energy']              [:numb_test],
                          t_force:         np.reshape(test_data['force']    [:numb_test, :], [-1]),
                          t_virial:        np.reshape(test_data['virial']   [:numb_test, :], [-1]),
                          t_atom_ener:     np.reshape(test_data['atom_ener'][:numb_test, :], [-1]),
                          t_coord:         np.reshape(test_data['coord']    [:numb_test, :], [-1]),
                          t_box:           test_data['box']                 [:numb_test, :],
                          t_type:          np.reshape(test_data['type']     [:numb_test, :], [-1]),
                          t_natoms:        test_data['natoms_vec'],
                          t_mesh:          test_data['default_mesh'],
                          is_training:     False}

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        [e, f, v] = sess.run([energy, force, virial], 
                             feed_dict = feed_dict_test)

        e = e.reshape([-1])
        f = f.reshape([-1])
        v = v.reshape([-1])
        refe = [6.152085988309423925e+01]
        reff = [-1.714443151616400110e-04,-1.315836609370952051e-04,-5.584120460897444674e-06,-7.197863450669731334e-05,-1.384609799994930676e-04,8.856091902774708468e-06,1.120578238869146797e-04,-7.428703645877488470e-05,9.370560731488587317e-07,-1.048347129617610465e-04,1.977876923815685781e-04,7.522050342771599598e-06,2.361772659657814205e-04,-5.774651813388292487e-05,-1.233143271630744828e-05,2.257277740226381951e-08,2.042905031476775584e-04,6.003548585097267914e-07]
        refv = [1.035180911513190792e-03,-1.118982949050497126e-04,-2.383287813436022850e-05,-1.118982949050497126e-04,4.362023915782403281e-04,8.119543218224559240e-06,-2.383287813436022850e-05,8.119543218224559240e-06,1.201142938802945237e-06]
        refe = np.reshape(refe, [-1])
        reff = np.reshape(reff, [-1])
        refv = np.reshape(refv, [-1])

        places = 6
        for ii in range(e.size) :
            self.assertAlmostEqual(e[ii], refe[ii], places = places)
        for ii in range(f.size) :
            self.assertAlmostEqual(f[ii], reff[ii], places = places)
        for ii in range(v.size) :
            self.assertAlmostEqual(v[ii], refv[ii], places = places)
    def test_model(self):
        jfile = 'water_se_a_fparam.json'
        jdata = j_loader(jfile)

        run_opt = RunOptions(None) 
        systems = j_must_have(jdata, 'systems')
        set_pfx = j_must_have(jdata, 'set_prefix')
        batch_size = j_must_have(jdata, 'batch_size')
        test_size = j_must_have(jdata, 'numb_test')
        batch_size = 1
        test_size = 1
        stop_batch = j_must_have(jdata, 'stop_batch')
        rcut = j_must_have (jdata['model']['descriptor'], 'rcut')
        
        data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None)
        
        test_data = data.get_test ()
        numb_test = 1
        
        descrpt = DescrptSeA(jdata['model']['descriptor'])
        fitting = EnerFitting(jdata['model']['fitting_net'], descrpt)
        model = Model(jdata['model'], descrpt, fitting)

        # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']])
        input_data = {'coord' : [test_data['coord']], 
                      'box': [test_data['box']], 
                      'type': [test_data['type']],
                      'natoms_vec' : [test_data['natoms_vec']],
                      'default_mesh' : [test_data['default_mesh']],
                      'fparam': [test_data['fparam']],
        }
        model._compute_input_stat(input_data)
        model.descrpt.bias_atom_e = data.compute_energy_shift()

        t_prop_c           = tf.placeholder(tf.float32, [5],    name='t_prop_c')
        t_energy           = tf.placeholder(global_ener_float_precision, [None], name='t_energy')
        t_force            = tf.placeholder(global_tf_float_precision, [None], name='t_force')
        t_virial           = tf.placeholder(global_tf_float_precision, [None], name='t_virial')
        t_atom_ener        = tf.placeholder(global_tf_float_precision, [None], name='t_atom_ener')
        t_coord            = tf.placeholder(global_tf_float_precision, [None], name='i_coord')
        t_type             = tf.placeholder(tf.int32,   [None], name='i_type')
        t_natoms           = tf.placeholder(tf.int32,   [model.ntypes+2], name='i_natoms')
        t_box              = tf.placeholder(global_tf_float_precision, [None, 9], name='i_box')
        t_mesh             = tf.placeholder(tf.int32,   [None], name='i_mesh')
        t_fparam           = tf.placeholder(global_tf_float_precision, [None], name='i_fparam')
        is_training        = tf.placeholder(tf.bool)
        input_dict = {}
        input_dict['fparam'] = t_fparam

        model_pred\
            = model.build (t_coord, 
                           t_type, 
                           t_natoms, 
                           t_box, 
                           t_mesh,
                           input_dict,
                           suffix = "se_a_fparam", 
                           reuse = False)
        energy = model_pred['energy']
        force  = model_pred['force']
        virial = model_pred['virial']
        atom_ener =  model_pred['atom_ener']

        feed_dict_test = {t_prop_c:        test_data['prop_c'],
                          t_energy:        test_data['energy']              [:numb_test],
                          t_force:         np.reshape(test_data['force']    [:numb_test, :], [-1]),
                          t_virial:        np.reshape(test_data['virial']   [:numb_test, :], [-1]),
                          t_atom_ener:     np.reshape(test_data['atom_ener'][:numb_test, :], [-1]),
                          t_coord:         np.reshape(test_data['coord']    [:numb_test, :], [-1]),
                          t_box:           test_data['box']                 [:numb_test, :],
                          t_type:          np.reshape(test_data['type']     [:numb_test, :], [-1]),
                          t_natoms:        test_data['natoms_vec'],
                          t_mesh:          test_data['default_mesh'],
                          t_fparam:        np.reshape(test_data['fparam']   [:numb_test, :], [-1]),
                          is_training:     False}

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        [e, f, v] = sess.run([energy, force, virial], 
                             feed_dict = feed_dict_test)

        e = e.reshape([-1])
        f = f.reshape([-1])
        v = v.reshape([-1])
        refe = [61.35473702079649]
        reff = [7.789591210641927388e-02,9.411176646369459609e-02,3.785806413688173194e-03,1.430830954178063386e-01,1.146964190520970150e-01,-1.320340288927138173e-02,-7.308720494747594776e-02,6.508269338140809657e-02,5.398739145542804643e-04,5.863268336973800898e-02,-1.603409523950408699e-01,-5.083084610994957619e-03,-2.551569799443983988e-01,3.087934885732580501e-02,1.508590526622844222e-02,4.863249399791078065e-02,-1.444292753594846324e-01,-1.125098094204559241e-03]
        refv = [-6.069498397488943819e-01,1.101778888191114192e-01,1.981907430646132409e-02,1.101778888191114608e-01,-3.315612988100872793e-01,-5.999739184898976799e-03,1.981907430646132756e-02,-5.999739184898974197e-03,-1.198656608172396325e-03]
        refe = np.reshape(refe, [-1])
        reff = np.reshape(reff, [-1])
        refv = np.reshape(refv, [-1])

        places = 10
        for ii in range(e.size) :
            self.assertAlmostEqual(e[ii], refe[ii], places = places)
        for ii in range(f.size) :
            self.assertAlmostEqual(f[ii], reff[ii], places = places)
        for ii in range(v.size) :
            self.assertAlmostEqual(v[ii], refv[ii], places = places)
예제 #10
0
    def test_model(self):
        jfile = 'water.json'
        jdata = j_loader(jfile)
        run_opt = RunOptions(None)
        systems = j_must_have(jdata, 'systems')
        set_pfx = j_must_have(jdata, 'set_prefix')
        batch_size = j_must_have(jdata, 'batch_size')
        test_size = j_must_have(jdata, 'numb_test')
        batch_size = 1
        test_size = 1
        stop_batch = j_must_have(jdata, 'stop_batch')
        rcut = j_must_have(jdata['model']['descriptor'], 'rcut')

        data = DataSystem(systems,
                          set_pfx,
                          batch_size,
                          test_size,
                          rcut,
                          run_opt=None)

        test_data = data.get_test()
        numb_test = 1

        descrpt = DescrptLocFrame(jdata['model']['descriptor'])
        fitting = EnerFitting(jdata['model']['fitting_net'], descrpt)
        model = Model(jdata['model'], descrpt, fitting)

        # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']])
        input_data = {
            'coord': [test_data['coord']],
            'box': [test_data['box']],
            'type': [test_data['type']],
            'natoms_vec': [test_data['natoms_vec']],
            'default_mesh': [test_data['default_mesh']]
        }
        model._compute_input_stat(input_data)
        model.fitting.bias_atom_e = data.compute_energy_shift()

        t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c')
        t_energy = tf.placeholder(global_ener_float_precision, [None],
                                  name='t_energy')
        t_force = tf.placeholder(global_tf_float_precision, [None],
                                 name='t_force')
        t_virial = tf.placeholder(global_tf_float_precision, [None],
                                  name='t_virial')
        t_atom_ener = tf.placeholder(global_tf_float_precision, [None],
                                     name='t_atom_ener')
        t_coord = tf.placeholder(global_tf_float_precision, [None],
                                 name='i_coord')
        t_type = tf.placeholder(tf.int32, [None], name='i_type')
        t_natoms = tf.placeholder(tf.int32, [model.ntypes + 2],
                                  name='i_natoms')
        t_box = tf.placeholder(global_tf_float_precision, [None, 9],
                               name='i_box')
        t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh')
        is_training = tf.placeholder(tf.bool)
        t_fparam = None

        model_pred \
            = model.build (t_coord,
                           t_type,
                           t_natoms,
                           t_box,
                           t_mesh,
                           t_fparam,
                           suffix = "loc_frame",
                           reuse = False)
        energy = model_pred['energy']
        force = model_pred['force']
        virial = model_pred['virial']
        atom_ener = model_pred['atom_ener']

        feed_dict_test = {
            t_prop_c: test_data['prop_c'],
            t_energy: test_data['energy'][:numb_test],
            t_force: np.reshape(test_data['force'][:numb_test, :], [-1]),
            t_virial: np.reshape(test_data['virial'][:numb_test, :], [-1]),
            t_atom_ener: np.reshape(test_data['atom_ener'][:numb_test, :],
                                    [-1]),
            t_coord: np.reshape(test_data['coord'][:numb_test, :], [-1]),
            t_box: test_data['box'][:numb_test, :],
            t_type: np.reshape(test_data['type'][:numb_test, :], [-1]),
            t_natoms: test_data['natoms_vec'],
            t_mesh: test_data['default_mesh'],
            is_training: False
        }

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test)

        e = e.reshape([-1])
        f = f.reshape([-1])
        v = v.reshape([-1])
        refe = [1.165945032784766511e+01]
        reff = [
            2.356319331246305437e-01, 1.772322096063349284e-01,
            1.455439548950788684e-02, 1.968599426000810226e-01,
            2.648214484898352983e-01, 7.595232354012236564e-02,
            -2.121321856338151401e-01, -2.463886119018566037e-03,
            -2.075636300914874069e-02, -9.360310077571798101e-03,
            -1.751965198776750943e-01, -2.046405309983102827e-02,
            -1.990194093283037535e-01, -1.828347741191920298e-02,
            -6.916374506995154325e-02, -1.197997068502068031e-02,
            -2.461097746875573200e-01, 1.987744214930105627e-02
        ]
        refv = [
            -4.998509978510510265e-01, -1.966169437179327711e-02,
            1.136130543869883977e-02, -1.966169437179334650e-02,
            -4.575353297894450555e-01, -2.668666556859019493e-03,
            1.136130543869887100e-02, -2.668666556859039876e-03,
            2.455466940358383508e-03
        ]
        refe = np.reshape(refe, [-1])
        reff = np.reshape(reff, [-1])
        refv = np.reshape(refv, [-1])

        places = 10
        for ii in range(e.size):
            self.assertAlmostEqual(e[ii], refe[ii], places=places)
        for ii in range(f.size):
            self.assertAlmostEqual(f[ii], reff[ii], places=places)
        for ii in range(v.size):
            self.assertAlmostEqual(v[ii], refv[ii], places=places)
예제 #11
0
파일: train.py 프로젝트: y1xiaoc/deepmd-kit
def train(
    *,
    INPUT: str,
    init_model: Optional[str],
    restart: Optional[str],
    output: str,
    init_frz_model: str,
    mpi_log: str,
    log_level: int,
    log_path: Optional[str],
    is_compress: bool = False,
    skip_neighbor_stat: bool = False,
    **kwargs,
):
    """Run DeePMD model training.

    Parameters
    ----------
    INPUT : str
        json/yaml control file
    init_model : Optional[str]
        path to checkpoint folder or None
    restart : Optional[str]
        path to checkpoint folder or None
    output : str
        path for dump file with arguments
    init_frz_model : str
        path to frozen model or None
    mpi_log : str
        mpi logging mode
    log_level : int
        logging level defined by int 0-3
    log_path : Optional[str]
        logging file path or None if logs are to be output only to stdout
    is_compress: bool
        indicates whether in the model compress mode
    skip_neighbor_stat : bool, default=False
        skip checking neighbor statistics

    Raises
    ------
    RuntimeError
        if distributed training job nem is wrong
    """
    run_opt = RunOptions(
        init_model=init_model,
        restart=restart,
        init_frz_model=init_frz_model,
        log_path=log_path,
        log_level=log_level,
        mpi_log=mpi_log
    )
    if run_opt.is_distrib and len(run_opt.gpus or []) > 1:
        # avoid conflict of visible gpus among multipe tf sessions in one process
        reset_default_tf_session_config(cpu_only=True)

    # load json database
    jdata = j_loader(INPUT)

    jdata = update_deepmd_input(jdata, warning=True, dump="input_v2_compat.json")

    jdata = normalize(jdata)

    if not is_compress and not skip_neighbor_stat:
        jdata = update_sel(jdata)

    with open(output, "w") as fp:
        json.dump(jdata, fp, indent=4)

    # save the training script into the graph
    tf.constant(json.dumps(jdata), name='train_attr/training_script', dtype=tf.string)

    for message in WELCOME + CITATION + BUILD:
        log.info(message)

    run_opt.print_resource_summary()
    _do_work(jdata, run_opt, is_compress)
예제 #12
0
 def test_convert_smth(self):
     jdata0 = j_loader(os.path.join('compat_inputs', 'water_se_a_v0.json'))
     jdata1 = j_loader(os.path.join('compat_inputs', 'water_se_a_v1.json'))
     jdata = convert_input_v0_v1(jdata0, warning=False, dump=None)
     self.assertEqual(jdata, jdata1)
예제 #13
0
    def test_model(self):
        jfile = 'polar_se_a.json'
        jdata = j_loader(jfile)

        run_opt = RunOptions(None)
        systems = j_must_have(jdata, 'systems')
        set_pfx = j_must_have(jdata, 'set_prefix')
        batch_size = j_must_have(jdata, 'batch_size')
        test_size = j_must_have(jdata, 'numb_test')
        batch_size = 1
        test_size = 1
        stop_batch = j_must_have(jdata, 'stop_batch')
        rcut = j_must_have(jdata['model']['descriptor'], 'rcut')

        data = DataSystem(systems,
                          set_pfx,
                          batch_size,
                          test_size,
                          rcut,
                          run_opt=None)

        test_data = data.get_test()
        numb_test = 1

        descrpt = DescrptSeA(jdata['model']['descriptor'])
        fitting = PolarFittingSeA(jdata['model']['fitting_net'], descrpt)
        model = PolarModel(jdata['model'], descrpt, fitting)

        # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']])
        input_data = {
            'coord': [test_data['coord']],
            'box': [test_data['box']],
            'type': [test_data['type']],
            'natoms_vec': [test_data['natoms_vec']],
            'default_mesh': [test_data['default_mesh']],
            'fparam': [test_data['fparam']],
        }
        model._compute_input_stat(input_data)

        t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c')
        t_energy = tf.placeholder(global_ener_float_precision, [None],
                                  name='t_energy')
        t_force = tf.placeholder(global_tf_float_precision, [None],
                                 name='t_force')
        t_virial = tf.placeholder(global_tf_float_precision, [None],
                                  name='t_virial')
        t_atom_ener = tf.placeholder(global_tf_float_precision, [None],
                                     name='t_atom_ener')
        t_coord = tf.placeholder(global_tf_float_precision, [None],
                                 name='i_coord')
        t_type = tf.placeholder(tf.int32, [None], name='i_type')
        t_natoms = tf.placeholder(tf.int32, [model.ntypes + 2],
                                  name='i_natoms')
        t_box = tf.placeholder(global_tf_float_precision, [None, 9],
                               name='i_box')
        t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh')
        is_training = tf.placeholder(tf.bool)
        t_fparam = None

        model_pred \
            = model.build (t_coord,
                           t_type,
                           t_natoms,
                           t_box,
                           t_mesh,
                           t_fparam,
                           suffix = "polar_se_a",
                           reuse = False)
        polar = model_pred['polar']

        feed_dict_test = {
            t_prop_c: test_data['prop_c'],
            t_coord: np.reshape(test_data['coord'][:numb_test, :], [-1]),
            t_box: test_data['box'][:numb_test, :],
            t_type: np.reshape(test_data['type'][:numb_test, :], [-1]),
            t_natoms: test_data['natoms_vec'],
            t_mesh: test_data['default_mesh'],
            is_training: False
        }

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        [p] = sess.run([polar], feed_dict=feed_dict_test)

        p = p.reshape([-1])
        refp = [
            3.39695248e+01, 2.16564043e+01, 8.18501479e-01, 2.16564043e+01,
            1.38211789e+01, 5.22775159e-01, 8.18501479e-01, 5.22775159e-01,
            1.97847218e-02, 8.08467431e-01, 3.42081126e+00, -2.01072261e-01,
            3.42081126e+00, 1.54924596e+01, -9.06153697e-01, -2.01072261e-01,
            -9.06153697e-01, 5.30193262e-02
        ]

        places = 6
        for ii in range(p.size):
            self.assertAlmostEqual(p[ii], refp[ii], places=places)
예제 #14
0
    def test_model(self):
        jfile = 'wfc.json'
        jdata = j_loader(jfile)

        run_opt = RunOptions(None)
        systems = j_must_have(jdata, 'systems')
        set_pfx = j_must_have(jdata, 'set_prefix')
        batch_size = j_must_have(jdata, 'batch_size')
        test_size = j_must_have(jdata, 'numb_test')
        batch_size = 1
        test_size = 1
        stop_batch = j_must_have(jdata, 'stop_batch')
        rcut = j_must_have(jdata['model']['descriptor'], 'rcut')

        data = DataSystem(systems,
                          set_pfx,
                          batch_size,
                          test_size,
                          rcut,
                          run_opt=None)

        test_data = data.get_test()
        numb_test = 1

        descrpt = DescrptLocFrame(jdata['model']['descriptor'])
        fitting = WFCFitting(jdata['model']['fitting_net'], descrpt)
        model = WFCModel(jdata['model'], descrpt, fitting)

        input_data = {
            'coord': [test_data['coord']],
            'box': [test_data['box']],
            'type': [test_data['type']],
            'natoms_vec': [test_data['natoms_vec']],
            'default_mesh': [test_data['default_mesh']],
            'fparam': [test_data['fparam']],
        }
        model._compute_input_stat(input_data)

        t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c')
        t_energy = tf.placeholder(global_ener_float_precision, [None],
                                  name='t_energy')
        t_force = tf.placeholder(global_tf_float_precision, [None],
                                 name='t_force')
        t_virial = tf.placeholder(global_tf_float_precision, [None],
                                  name='t_virial')
        t_atom_ener = tf.placeholder(global_tf_float_precision, [None],
                                     name='t_atom_ener')
        t_coord = tf.placeholder(global_tf_float_precision, [None],
                                 name='i_coord')
        t_type = tf.placeholder(tf.int32, [None], name='i_type')
        t_natoms = tf.placeholder(tf.int32, [model.ntypes + 2],
                                  name='i_natoms')
        t_box = tf.placeholder(global_tf_float_precision, [None, 9],
                               name='i_box')
        t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh')
        is_training = tf.placeholder(tf.bool)
        t_fparam = None

        model_pred \
            = model.build (t_coord,
                           t_type,
                           t_natoms,
                           t_box,
                           t_mesh,
                           t_fparam,
                           suffix = "wfc",
                           reuse = False)
        wfc = model_pred['wfc']

        feed_dict_test = {
            t_prop_c: test_data['prop_c'],
            t_coord: np.reshape(test_data['coord'][:numb_test, :], [-1]),
            t_box: test_data['box'][:numb_test, :],
            t_type: np.reshape(test_data['type'][:numb_test, :], [-1]),
            t_natoms: test_data['natoms_vec'],
            t_mesh: test_data['default_mesh'],
            is_training: False
        }

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        [p] = sess.run([wfc], feed_dict=feed_dict_test)

        p = p.reshape([-1])
        refp = [
            -9.105016838228578990e-01, 7.196284362034099935e-01,
            -9.548516928185298014e-02, 2.764615027095288724e+00,
            2.661319598995644520e-01, 7.579512949131941846e-02,
            -2.107409067376114997e+00, -1.299080016614967414e-01,
            -5.962778584850070285e-01, 2.913899917663253514e-01,
            -1.226917174638697094e+00, 1.829523069930876655e+00,
            1.015704024959750873e+00, -1.792333611099589386e-01,
            5.032898080485321834e-01, 1.808561721292949453e-01,
            2.468863482075112081e+00, -2.566442546384765100e-01,
            -1.467453783795173994e-01, -1.822963931552128658e+00,
            5.843600156865462747e-01, -1.493875280832117403e+00,
            1.693322352814763398e-01, -1.877325443995481624e+00
        ]

        places = 6
        for ii in range(p.size):
            self.assertAlmostEqual(p[ii], refp[ii], places=places)