def test_combined(self): ca = Argument("base", dict, [ Argument("sub1", int, optional=True, default=1, alias=["sub1a"]), Argument( "sub2", str, optional=True, default="haha", alias=["sub2a"]) ]) beg1 = {"base": {}} ref1 = {"base": {"sub1": 1, "sub2": "haha"}} self.assertDictEqual(ca.normalize(beg1), ref1) self.assertDictEqual(ca.normalize_value(beg1["base"]), ref1["base"]) beg2 = {"base": {"sub1a": 2, "sub2a": "hoho", "_comment": None}} ref2 = {"base": {"sub1": 2, "sub2": "hoho"}} self.assertDictEqual(ca.normalize(beg2, trim_pattern="_*"), ref2) self.assertDictEqual( ca.normalize_value(beg2["base"], trim_pattern="_*"), ref2["base"])
def normalize(data): ma = model_args() lra = learning_rate_args() la = loss_args() ta = training_args() base = Argument("base", dict, [ma, lra, la, ta]) data = base.normalize_value(data, trim_pattern="_*") base.check_value(data) return data
def test_complicated(self): ca = Argument("base", dict, [ Argument("sub1", int, optional=True, default=1, alias=["sub1a"]), Argument( "sub2", list, [ Argument( "ss1", int, optional=True, default=21, alias=["ss1a"]) ], repeat=True, alias=["sub2a"]) ], [ Variant("vnt_flag", [ Argument("type1", dict, [ Argument("shared", int, optional=True, default=-1, alias=["shareda"]), Argument("vnt1", int, optional=True, default=111, alias=["vnt1a"]), ]), Argument("type2", dict, [ Argument("shared", int, optional=True, default=-2, alias=["sharedb"]), Argument("vnt2", int, optional=True, default=222, alias=["vnt2a"]), ], alias=['type3']) ], optional=True, default_tag="type1") ]) beg1 = {"base": {"sub2": [{}, {}]}} ref1 = { 'base': { 'sub1': 1, 'sub2': [{ 'ss1': 21 }, { 'ss1': 21 }], 'vnt_flag': "type1", 'shared': -1, 'vnt1': 111 } } self.assertDictEqual(ca.normalize(beg1), ref1) self.assertDictEqual(ca.normalize_value(beg1["base"]), ref1["base"]) beg2 = { "base": { "sub1a": 2, "sub2a": [{ "ss1a": 22 }, { "_comment1": None }], "vnt_flag": "type3", "sharedb": -3, "vnt2a": 223, "_comment2": None } } ref2 = { 'base': { 'sub1': 2, 'sub2': [{ 'ss1': 22 }, { 'ss1': 21 }], "vnt_flag": "type2", 'shared': -3, 'vnt2': 223 } } self.assertDictEqual(ca.normalize(beg2, trim_pattern="_*"), ref2) self.assertDictEqual( ca.normalize_value(beg2["base"], trim_pattern="_*"), ref2["base"]) with self.assertRaises(ValueError): ca.normalize(beg2, trim_pattern="sub*") with self.assertRaises(ValueError): ca.normalize(beg2, trim_pattern="vnt*")
def gdi_main_loop(jdata, mdata, gdidata_dict, gdidata_cli={}, workflow=None): gdiargs = [ Argument("begin", float, optional=False), Argument("end", float, optional=False), Argument("direction", str, optional=False), Argument("initial_value", float, optional=False), Argument("step_value", [list, float], optional=True, default=None), Argument("abs_tol", float, optional=True, default=10,), Argument("rel_tol", float, optional=True, default=0.01), Argument("if_water", bool, optional=True, default=None), Argument("output", str, optional=True, default="new_job/"), Argument("first_step", float, optional=True, default=None), Argument("shift", list, optional=True, default=[0.0, 0.0]), Argument("verbose", bool, optional=True, default=True), Argument("if_meam", bool, optional=True, default=None), ] gdidata_format = Argument("gdidata", dict, gdiargs) gdidata = gdidata_dict.copy() gdidata = gdidata_format.normalize_value(gdidata_cli) gdidata = gdidata_format.normalize_value(gdidata_dict) with open(os.path.join(os.path.dirname(os.path.abspath(gdidata['output'])), 'gdidata.run.json'), 'w') as f: json.dump(gdidata, f, indent=4) natoms = None if gdidata['if_water']: conf_0 = jdata['phase_i']['equi_conf'] conf_1 = jdata['phase_ii']['equi_conf'] natoms = [get_natoms(conf_0), get_natoms(conf_1)] natoms = [ii // 3 for ii in natoms] print ('# natoms: ', natoms) print ('# shifts: ', gdidata['shift']) meam_model = jdata.get('meam_model', None) # with open('') gdf = GibbsDuhemFunc(jdata, mdata, task_path=gdidata['output'], inte_dir=gdidata['direction'], natoms = natoms, shift = gdidata['shift'], verbose = gdidata['verbose'], if_meam=gdidata['if_meam'], meam_model=meam_model, workflow=workflow) sol = solve_ivp(gdf, [gdidata['begin'], gdidata['end']], [gdidata['initial_value']], t_eval = gdidata['step_value'], method = 'RK23', atol=gdidata['abs_tol'], rtol=gdidata['rel_tol'], first_step = gdidata['first_step'] ) if gdidata['direction'] == 't' : tmp = np.concatenate([sol.t, sol.y[0]]) else : tmp = np.concatenate([sol.y[0], sol.t]) tmp = np.reshape(tmp, [2,-1]) np.savetxt(os.path.join(gdidata['output'], 'pb.out'), tmp.T) return True
def make_task(iter_name, jdata, ens=None, temp=None, pres=None, if_dump_avg_posi=None, npt_dir=None): # jfile_path = os.path.abspath(jfile) # with open(jfile, 'r') as f: # jdata = json.load(f) equi_args = [ Argument("equi_conf", str), Argument("mass_map", list, alias=['model_mass_map']), Argument("model", str), Argument("nsteps", int), Argument("timestep", float, alias=['dt']), Argument("ens", str), Argument("temp", int), Argument("pres", int), Argument("tau_t", float), Argument("tau_p", float), Argument("thermo_freq", int, alias=['stat_freq']), Argument("dump_freq", int), Argument("stat_skip", int), Argument("stat_bsize", int), Argument("if_dump_avg_posi", bool, optional=True, default=False), Argument("is_water", bool, optional=True, default=False, alias=['if_water']), Argument("if_meam", bool, optional=True, default=False), Argument("meam_model", list, optional=True, default=False), ] equi_format = Argument("equi", dict, equi_args) equi_kwargs_settings = create_dict_not_empty_key( ens=ens, temp=temp, pres=pres, if_dump_avg_posi=if_dump_avg_posi, npt_dir=npt_dir) equi_pre_settings = jdata.copy() equi_pre_settings.update(equi_kwargs_settings) equi_settings = equi_format.normalize_value(equi_pre_settings) task_abs_dir = create_path(iter_name) if npt_dir is not None: npt_avg_conf_lmp = npt_equi_conf(npt_dir) with open(os.path.join(task_abs_dir, 'npt_avg.lmp'), 'w') as f: f.write(npt_avg_conf_lmp) equi_settings['equi_conf'] = 'npt_avg.lmp' else: equi_conf = equi_settings['equi_conf'] relative_link_file(equi_conf, task_abs_dir) equi_settings['equi_conf'] = os.path.basename(equi_conf) model = equi_settings['model'] if model: relative_link_file(model, task_abs_dir) equi_settings['model'] = os.path.basename(model) if_meam = equi_settings.get('if_meam', None) meam_model = equi_settings.get('meam_model', None) if if_meam: relative_link_file(meam_model['library'], task_abs_dir) relative_link_file(meam_model['potential'], task_abs_dir) with open(os.path.join(task_abs_dir, 'jdata.json'), 'w') as f: json.dump(jdata, f, indent=4) with open(os.path.join(task_abs_dir, 'equi_kwargs_setting.json'), 'w') as f: json.dump(equi_kwargs_settings, f, indent=4) with open(os.path.join(task_abs_dir, 'equi_settings.json'), 'w') as f: json.dump(equi_settings, f, indent=4) lmp_str = gen_equi_lammps_input( nsteps=equi_settings['nsteps'], thermo_freq=equi_settings['thermo_freq'], dump_freq=equi_settings['dump_freq'], mass_map=equi_settings['mass_map'], temp=equi_settings['temp'], tau_t=equi_settings['tau_t'], tau_p=equi_settings['tau_p'], equi_conf=equi_settings['equi_conf'], model=equi_settings['model'], timestep=equi_settings['timestep'], if_dump_avg_posi=equi_settings['if_dump_avg_posi'], ens=equi_settings['ens'], pres=equi_settings['pres'], if_meam=equi_settings['if_meam'], meam_model=equi_settings['meam_model']) with open(os.path.join(task_abs_dir, 'in.lammps'), 'w') as fp: fp.write(lmp_str) return equi_settings