def run_equi(task_type,jdata,mdata): #rmprint("This module has been run !") work_path=util.make_work_path(jdata,'00.equi',False,False,False) all_task = glob.glob(os.path.join(work_path,'.')) #vasp if task_type=="vasp": mdata=decide_fp_machine(mdata) forward_files = ['INCAR', 'POTCAR', 'KPOINTS'] if ('cvasp' in jdata) and (jdata['cvasp'] == True): mdata['fp_resources']['cvasp'] = True forward_files.append('cvasp.py') backward_files = ['OUTCAR', task_type+'.out' , 'CONTCAR','OSZICAR'] common_files=['POSCAR'] #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) forward_files = ['conf.lmp', 'lammps.in'] backward_files = ['dump.relax','log.lammps', task_type+'.out'] fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name =fp_params['model_name'] if not model_name : models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir,ii) for ii in model_name] common_files = model_name if len(model_name)>1 and task_type == 'deepmd': backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError ("unknow task %s, something wrong" % task_type) run_tasks = util.collect_task(all_task,task_type) if len(run_tasks)==0: return machine,resources,command,group_size=util.get_machine_info(mdata,task_type) disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size) #debug# #print(' '.join(common_files)) #print(' '.join(forward_files)) #print(' '.join(backward_files)) disp.run_jobs(resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files, outlog=task_type+'.out', errlog=task_type+'.err')
def run_surf(task_type,jdata,mdata): static=jdata['static-opt'] work_path=util.make_work_path(jdata,'05.surf',False,static,False) all_task = glob.glob(os.path.join(work_path,'struct-*')) #vasp if task_type == "vasp": mdata=decide_fp_machine(mdata) forward_files = ['INCAR', 'POSCAR','POTCAR','KPOINTS'] backward_files = ['OUTCAR', task_type+'.out' , 'OSZICAR'] common_files=['INCAR','POTCAR'] if ('cvasp' in jdata) and (jdata['cvasp'] == True): mdata['fp_resources']['cvasp'] = True forward_files.append('cvasp.py') #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name =fp_params['model_name'] if not model_name : models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir,ii) for ii in model_name] forward_files = ['conf.lmp', 'lammps.in']+model_name backward_files = ['log.lammps',task_type+'.out'] common_files=['lammps.in']+model_name if len(model_name)>1 and task_type == 'deepmd': backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError ("unknow task %s, something wrong" % task_type) run_tasks = util.collect_task(all_task,task_type) if len(run_tasks)==0: return else: run_tasks = [os.path.basename(ii) for ii in all_task] machine,resources,command,group_size=util.get_machine_info(mdata,task_type) disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size) disp.run_jobs(resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files, outlog=task_type+'.out', errlog=task_type+'.err')
def run_equi(confs, inter_param, mdata): # find all POSCARs and their name like mp-xxx # ... conf_dirs = [] for conf in confs: conf_dirs.extend(glob.glob(conf)) conf_dirs.sort() # generate a list of task names like mp-xxx/relaxation/relax_task # ... work_path_list = [] for ii in conf_dirs: work_path_list.append(os.path.abspath(os.path.join(ii, 'relaxation'))) all_task = [] for ii in work_path_list: all_task.append(os.path.join(ii, 'relax_task')) inter_type = inter_param['type'] # vasp if inter_type == "vasp": mdata = decide_fp_machine(mdata) elif inter_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) else: raise RuntimeError("unknown task %s, something wrong" % inter_type) # dispatch the tasks # POSCAR here is useless virtual_calculator = make_calculator(inter_param, "POSCAR") forward_files = virtual_calculator.forward_files() forward_common_files = virtual_calculator.forward_common_files() backward_files = virtual_calculator.backward_files() # backward_files += logs # ... run_tasks = util.collect_task(all_task, inter_type) if len(run_tasks) == 0: return else: run_tasks = [os.path.basename(ii) for ii in all_task] machine, resources, command, group_size = util.get_machine_info(mdata, inter_type) print('%d tasks will be submited '%len(run_tasks)) for ii in range(len(work_path_list)): work_path = work_path_list[ii] disp = make_dispatcher(machine, resources, work_path, [run_tasks[ii]], group_size) print("%s --> Runing... "%(work_path)) disp.run_jobs(resources, command, work_path, [run_tasks[ii]], group_size, forward_common_files, forward_files, backward_files, outlog='outlog', errlog='errlog')
def run_vacancy(task_type,jdata,mdata): work_path=util.make_work_path(jdata,'03.vacancy',False,False,False) all_task = glob.glob(os.path.join(work_path,'struct-*')) #vasp if task_type == "vasp": mdata=decide_fp_machine(mdata) forward_files = ['INCAR', 'POSCAR','POTCAR'] backward_files = ['OUTCAR', 'autotest.out' , 'OSZICAR'] common_files=['INCAR','POTCAR'] #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name =fp_params['model_name'] if not model_name : models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir,ii) for ii in model_name] common_files = model_name forward_files = ['conf.lmp', 'lammps.in']+model_name backward_files = ['log.lammps','autotest.out'] common_files=['lammps.in']+model_name if len(model_name)>1 and task_type == 'deepmd': backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError ("unknow task %s, something wrong" % task_type) run_tasks = util.collect_task(all_task,task_type) if len(run_tasks)==0: return machine,machine_type,ssh_sess,resources,command,group_size=util.get_machine_info(mdata,task_type) disp = make_dispatcher(machine) disp.run_jobs(resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files, outlog='autotest.out', errlog='autotest.err')
def run_equi(confs, inter_param, mdata): # find all POSCARs and their name like mp-xxx # ... conf_dirs = glob.glob(confs) conf_dirs.sort() # generate a list of task names like mp-xxx/relaxation # ... work_path_list = [] for ii in conf_dirs: work_path_list.append(os.path.join(ii, 'relaxation')) all_task = [] for ii in work_path_list: all_task.append(os.path.join(ii, '.')) inter_type = inter_param['type'] # vasp if inter_type == "vasp": mdata = decide_fp_machine(mdata) elif inter_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) else: raise RuntimeError("unknown task %s, something wrong" % task_type) # dispatch the tasks forward_files, forward_common_files, backward_files = make_task_trans_files( inter_param) # backward_files += logs # ... run_tasks = util.collect_task(all_task, inter_type) if len(run_tasks) == 0: return else: run_tasks = [os.path.basename(ii) for ii in all_task] machine, resources, command, group_size = util.get_machine_info( mdata, inter_type) for ii in range(len(work_path_list)): work_path = work_path_list[ii] disp = make_dispatcher(machine, resources, work_path, run_tasks[ii], group_size) disp.run_jobs(resources, command, work_path, run_tasks[ii], group_size, forward_common_files, forward_files, backward_files, outlog=inter_type + '.out', errlog=inter_type + '.err')
def run_equi(task_type, jdata, mdata): #rmprint("This module has been run !") work_path = util.make_work_path(jdata, '00.equi', False, False, False) all_task = glob.glob(os.path.join(work_path, '.')) #vasp if task_type == "vasp": mdata = decide_fp_machine(mdata) forward_files = ['INCAR', 'POTCAR'] backward_files = ['OUTCAR', 'CONTCAR', 'OSZICAR'] common_files = ['POSCAR'] #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) forward_files = ['conf.lmp', 'lammps.in'] backward_files = ['dump.relax', 'log.lammps', 'model_devi.log'] fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name = fp_params['model_name'] if not model_name: models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir, ii) for ii in model_name] common_files = model_name if len(model_name) > 1 and task_type == 'deepmd': backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError("unknow task %s, something wrong" % task_type) run_tasks = util.collect_task(all_task, task_type) machine, machine_type, ssh_sess, resources, command, group_size = util.get_machine_info( mdata, task_type) _run(machine, machine_type, ssh_sess, resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files)
def run_property(confs, inter_param, property_list, mdata): # find all POSCARs and their name like mp-xxx # ... # conf_dirs = glob.glob(confs) # conf_dirs.sort() processes = len(property_list) pool = Pool(processes=processes) print("Submit job via %d processes" % processes) conf_dirs = [] for conf in confs: conf_dirs.extend(glob.glob(conf)) conf_dirs.sort() task_list = [] work_path_list = [] for ii in conf_dirs: sepline(ch=ii, screen=True) for jj in property_list: # determine the suffix: from scratch or refine # ... if jj.get("skip", False): continue if 'init_from_suffix' and 'output_suffix' in jj: suffix = jj['output_suffix'] elif 'reproduce' in jj and jj['reproduce']: suffix = 'reprod' else: suffix = '00' property_type = jj['type'] path_to_work = os.path.abspath( os.path.join(ii, property_type + '_' + suffix)) work_path_list.append(path_to_work) tmp_task_list = glob.glob( os.path.join(path_to_work, 'task.[0-9]*[0-9]')) tmp_task_list.sort() task_list.append(tmp_task_list) inter_param_prop = inter_param if 'cal_setting' in jj and 'overwrite_interaction' in jj[ 'cal_setting']: inter_param_prop = jj['cal_setting']['overwrite_interaction'] # dispatch the tasks # POSCAR here is useless virtual_calculator = make_calculator(inter_param_prop, "POSCAR") forward_files = virtual_calculator.forward_files(property_type) forward_common_files = virtual_calculator.forward_common_files( property_type) backward_files = virtual_calculator.backward_files(property_type) # backward_files += logs # ... inter_type = inter_param_prop['type'] # vasp if inter_type == "vasp": mdata = decide_fp_machine(mdata) elif inter_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) else: raise RuntimeError("unknown task %s, something wrong" % inter_type) work_path = path_to_work all_task = tmp_task_list run_tasks = util.collect_task(all_task, inter_type) if len(run_tasks) == 0: return else: ret = pool.apply_async(worker, ( work_path, all_task, forward_common_files, forward_files, backward_files, mdata, inter_type, )) # run_tasks = [os.path.basename(ii) for ii in all_task] # machine, resources, command, group_size = util.get_machine_info(mdata, inter_type) # disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size) # disp.run_jobs(resources, # command, # work_path, # run_tasks, # group_size, # forward_common_files, # forward_files, # backward_files, # outlog='outlog', # errlog='errlog') pool.close() pool.join() if ret.successful(): print('finished')
def run_task(json_file, machine_file): with open(json_file, 'r') as fp: jdata = json.load(fp) with open(machine_file, 'r') as fp: mdata = json.load(fp) record = "record.auto_test" model_devi_mdata = decide_model_devi_machine(mdata) model_devi_machine = model_devi_mdata['model_devi_machine'] if ('machine_type' in model_devi_machine) and \ (model_devi_machine['machine_type'] == 'ucloud'): model_devi_ssh_sess = None else: model_devi_ssh_sess = SSHSession(model_devi_machine) fp_mdata = decide_fp_machine(mdata) fp_machine = fp_mdata['fp_machine'] if ('machine_type' in fp_machine) and \ (fp_machine['machine_type'] == 'ucloud'): fp_ssh_sess = None else: fp_ssh_sess = SSHSession(fp_machine) confs = jdata['conf_dir'] ele_list = [key for key in jdata['potcar_map'].keys()] key_id = jdata['key_id'] ii = jdata['task_type'] jj = jdata['task'] task_list = [ 'equi', 'eos', 'elastic', 'vacancy', 'interstitial', 'surf', 'phonon', 'all' ] task_type_list = ['vasp'] + lammps_task_type if jj not in task_list: raise RuntimeError("unknow task %s, something wrong" % jj) if ii not in task_type_list: raise RuntimeError("unknow task type %s, something wrong" % ii) #gen_configuration if 'confs' in confs and (not os.path.exists(confs + '/POSCAR')): print('generate %s' % (ele_list)) if len(ele_list) == 1: gen_confs.gen_element(ele_list[0], key_id) else: gen_confs.gen_alloy(ele_list, key_id) #default task log_iter("gen_equi", ii, "equi") gen_equi(ii, jdata, mdata) log_iter("run_equi", ii, "equi") run_equi(ii, jdata, mdata, model_devi_ssh_sess) log_iter("cmpt_equi", ii, "equi") cmpt_equi(ii, jdata, mdata) if jj == "eos" or jj == "all": log_iter("gen_eos", ii, "eos") gen_eos(ii, jdata, mdata) log_iter("run_eos", ii, "eos") run_eos(ii, jdata, mdata, model_devi_ssh_sess) log_iter("cmpt_eos", ii, "eos") cmpt_eos(ii, jdata, mdata) if jj == "elastic" or jj == "all": log_iter("gen_elastic", ii, "elastic") gen_elastic(ii, jdata, mdata) log_iter("run_elastic", ii, "elastic") run_elastic(ii, jdata, mdata, model_devi_ssh_sess) log_iter("cmpt_elastic", ii, "elastic") cmpt_elastic(ii, jdata, mdata) if jj == "vacancy" or jj == "all": log_iter("gen_vacancy", ii, "vacancy") gen_vacancy(ii, jdata, mdata) log_iter("run_vacancy", ii, "vacancy") run_vacancy(ii, jdata, mdata, model_devi_ssh_sess) log_iter("cmpt_vacancy", ii, "vacancy") cmpt_vacancy(ii, jdata, mdata) if jj == "interstitial" or jj == "all": log_iter("gen_interstitial", ii, "interstitial") gen_interstitial(ii, jdata, mdata) log_iter("run_interstitial", ii, "interstitial") run_interstitial(ii, jdata, mdata, model_devi_ssh_sess) log_iter("cmpt_interstitial", ii, "interstitial") cmpt_interstitial(ii, jdata, mdata) if jj == "surf" or jj == "all": log_iter("gen_surf", ii, "surf") gen_surf(ii, jdata, mdata) log_iter("run_surf", ii, "surf") run_surf(ii, jdata, mdata, model_devi_ssh_sess) log_iter("cmpt_surf", ii, "surf") cmpt_surf(ii, jdata, mdata) ''' if jj=="phonon": log_iter ("gen_phonon", ii, "phonon") gen_phonon (ii, jdata, mdata) log_iter ("run_phonon", ii, "phonon") run_phonon (ii, jdata, mdata,model_devi_ssh_sess) log_iter ("cmpt_phonon", ii, "phonon") cmpt_phonon (ii, jdata, mdata) ''' record_iter(record, confs, ii, jj)
def run_surf(task_type, jdata, mdata, ssh_sess): static = jdata['static-opt'] work_path = make_work_path(jdata, '05.surf', False, static, False) all_task = glob.glob(os.path.join(work_path, 'struct-*')) #vasp if task_type == "vasp": mdata = decide_fp_machine(mdata) vasp_exec = mdata['fp_command'] group_size = mdata['fp_group_size'] resources = mdata['fp_resources'] machine = mdata['fp_machine'] machine_type = mdata['fp_machine']['machine_type'] command = vasp_exec command = cmd_append_log(command, "log") run_tasks_ = [] for ii in all_task: fres = os.path.join(ii, 'OUTCAR') if os.path.isfile(fres): if not vasp.check_finished(fres): run_tasks_.append(ii) else: run_tasks_.append(ii) forward_files = ['INCAR', 'POSCAR', 'POTCAR'] backward_files = ['OUTCAR', 'OSZICAR'] common_files = ['INCAR', 'POTCAR'] #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) lmp_exec = mdata['lmp_command'] group_size = mdata['model_devi_group_size'] resources = mdata['model_devi_resources'] machine = mdata['model_devi_machine'] machine_type = mdata['model_devi_machine']['machine_type'] command = lmp_exec + " -i lammps.in" command = cmd_append_log(command, "model_devi.log") run_tasks_ = [] for ii in all_task: fres = os.path.join(ii, 'log.lammps') if os.path.isfile(fres): with open(fres, 'r') as fp: lines = fp.read().split('\n') flag = False for jj in lines: if ("Final energy per atoms" in jj) and (not 'print' in jj): flag = True if not flag: run_tasks_.append(ii) else: run_tasks_.append(ii) fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name = fp_params['model_name'] if not model_name: models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir, ii) for ii in model_name] forward_files = ['conf.lmp', 'lammps.in'] + model_name backward_files = ['log.lammps', 'model_devi.log'] common_files = ['lammps.in'] + model_name if len(model_name) > 1 and task_type == 'deepmd': backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError("unknow task %s, something wrong" % task_type) run_tasks = [os.path.basename(ii) for ii in run_tasks_] _run(machine, machine_type, ssh_sess, resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files)
def run_iter(param_file, machine_file): """ init (iter 0): init_pick tasks (iter > 0): 00 make_train (same as generator) 01 run_train (same as generator) 02 post_train (same as generator) 03 make_model_devi 04 run_model_devi 05 post_model_devi 06 make_fp 07 run_fp (same as generator) 08 post_fp (same as generator) """ # TODO: function of handling input json should be combined as one function try: import ruamel from monty.serialization import loadfn, dumpfn warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning) jdata = loadfn(param_file) mdata = loadfn(machine_file) except: with open(param_file, 'r') as fp: jdata = json.load(fp) with open(machine_file, 'r') as fp: mdata = json.load(fp) if jdata.get('pretty_print', False): fparam = SHORT_CMD+'_' + \ param_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json') dumpfn(jdata, fparam, indent=4) fmachine = SHORT_CMD+'_' + \ machine_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json') dumpfn(mdata, fmachine, indent=4) if mdata.get('handlers', None): if mdata['handlers'].get('smtp', None): que = queue.Queue(-1) queue_handler = logging.handlers.QueueHandler(que) smtp_handler = logging.handlers.SMTPHandler( **mdata['handlers']['smtp']) listener = logging.handlers.QueueListener(que, smtp_handler) dlog.addHandler(queue_handler) listener.start() max_tasks = 10000 numb_task = 9 record = "record.dpgen" iter_rec = [0, -1] if os.path.isfile(record): with open(record) as frec: for line in frec: iter_rec = [int(x) for x in line.split()] dlog.info("continue from iter %03d task %02d" % (iter_rec[0], iter_rec[1])) cont = True ii = -1 while cont: ii += 1 iter_name = make_iter_name(ii) sepline(iter_name, '=') for jj in range(numb_task): if ii * max_tasks + jj <= iter_rec[0] * max_tasks + iter_rec[1]: continue task_name = "task %02d" % jj sepline("{} {}".format(iter_name, task_name), '-') jdata['model_devi_jobs'] = [{} for _ in range(ii + 1)] if ii == 0 and jj < 6: if jj == 0: log_iter("init_pick", ii, jj) init_pick(ii, jdata, mdata) dlog.info("first iter, skip step 1-5") elif jj == 0: log_iter("make_train", ii, jj) make_train(ii, jdata, mdata) elif jj == 1: log_iter("run_train", ii, jj) mdata = decide_train_machine(mdata) disp = make_dispatcher(mdata['train_machine']) run_train(ii, jdata, mdata) elif jj == 2: log_iter("post_train", ii, jj) post_train(ii, jdata, mdata) elif jj == 3: log_iter("make_model_devi", ii, jj) cont = make_model_devi(ii, jdata, mdata) if not cont or ii >= jdata.get("stop_iter", ii + 1): break elif jj == 4: log_iter("run_model_devi", ii, jj) mdata = decide_model_devi_machine(mdata) disp = make_dispatcher(mdata['model_devi_machine']) run_model_devi(ii, jdata, mdata, disp) elif jj == 5: log_iter("post_model_devi", ii, jj) post_model_devi(ii, jdata, mdata) elif jj == 6: log_iter("make_fp", ii, jj) make_fp(ii, jdata, mdata) elif jj == 7: log_iter("run_fp", ii, jj) if jdata.get("labeled", False): dlog.info("already have labeled data, skip run_fp") else: mdata = decide_fp_machine(mdata) disp = make_dispatcher(mdata['fp_machine']) run_fp(ii, jdata, mdata) elif jj == 8: log_iter("post_fp", ii, jj) if jdata.get("labeled", False): dlog.info("already have labeled data, skip post_fp") else: post_fp(ii, jdata) else: raise RuntimeError("unknown task %d, something wrong" % jj) record_iter(record, ii, jj)
def run_interstitial(task_type,jdata,mdata): reprod_opt=jdata['reprod-opt'] work_path=util.make_work_path(jdata,'04.interstitial',reprod_opt,False,False) all_task = glob.glob(os.path.join(work_path,'struct-*')) #vasp if task_type == "vasp": mdata=decide_fp_machine(mdata) forward_files = ['INCAR', 'POSCAR','POTCAR',"KPOINTS"] backward_files = ['OUTCAR', task_type+'.out' , 'XDATCAR','OSZICAR'] common_files=['INCAR'] if ('cvasp' in jdata) and (jdata['cvasp'] == True): mdata['fp_resources']['cvasp'] = True forward_files.append('cvasp.py') #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) if reprod_opt: all_frame=[] for ii in all_task: all_frame+=(glob.glob(os.path.join(ii,'frame.*'))) work_path = all_task all_task = all_frame run_tasks_ = [] for ii in all_task: # fres = os.path.join(ii, 'log.lammps') # if os.path.isfile(fres) : # if not lammps.check_finished(fres): # run_tasks_.append(ii) # else : # run_tasks_.append(ii) run_tasks_.append(ii) fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name =fp_params['model_name'] if not model_name : models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir,ii) for ii in model_name] forward_files = ['conf.lmp', 'lammps.in']+model_name backward_files = ['log.lammps', task_type+'.out'] common_files=['lammps.in']+model_name if len(model_name)>1 and task_type == 'deepmd': backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError ("unknow task %s, something wrong" % task_type) machine,resources,command,group_size=util.get_machine_info(mdata,task_type) if reprod_opt: for ii in work_path: run_tasks=[] for jj in run_tasks_: if ii in jj: run_tasks.append(os.path.basename(jj)) disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size) disp.run_jobs(resources, command, ii, run_tasks, group_size, common_files, forward_files, backward_files, outlog=task_type+'.out', errlog=task_type+'.err') else: run_tasks = util.collect_task(all_task,task_type) if len(run_tasks)==0: return else: run_tasks = [os.path.basename(ii) for ii in all_task] disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size) disp.run_jobs(resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files, outlog=task_type+'.out', errlog=task_type+'.err')
def run_interstitial(task_type, jdata, mdata, ssh_sess): conf_dir = jdata['conf_dir'] fp_params = jdata['vasp_params'] kspacing = fp_params['kspacing'] reprod_opt = jdata['reprod-opt'] conf_path = os.path.abspath(conf_dir) task_path = re.sub('confs', '04.interstitial', conf_path) if task_type == "vasp": work_path = os.path.join(task_path, 'vasp-k%.2f' % kspacing) elif task_type in lammps_task_type: work_path = os.path.join(task_path, task_type) if reprod_opt: work_path = os.path.join(task_path, '%s-reprod-k%.2f' % (task_type, kspacing)) assert (os.path.isdir(work_path)) all_task = glob.glob(os.path.join(work_path, 'struct-*')) #vasp if task_type == "vasp": mdata = decide_fp_machine(mdata) vasp_exec = mdata['fp_command'] group_size = mdata['fp_group_size'] resources = mdata['fp_resources'] machine = mdata['fp_machine'] machine_type = mdata['fp_machine']['machine_type'] command = vasp_exec command = cmd_append_log(command, "log") run_tasks_ = [] for ii in all_task: fres = os.path.join(ii, 'OUTCAR') if os.path.isfile(fres): if not vasp.check_finished(fres): run_tasks_.append(ii) else: run_tasks_.append(ii) forward_files = ['INCAR', 'POSCAR', 'POTCAR'] backward_files = ['OUTCAR', 'XDATCAR', 'OSZICAR'] common_files = ['INCAR'] #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) lmp_exec = mdata['lmp_command'] group_size = mdata['model_devi_group_size'] resources = mdata['model_devi_resources'] machine = mdata['model_devi_machine'] machine_type = mdata['model_devi_machine']['machine_type'] command = lmp_exec + " -i lammps.in" command = cmd_append_log(command, "model_devi.log") if reprod_opt: all_frame = [] for ii in all_task: all_frame += (glob.glob(os.path.join(ii, 'frame.*'))) work_path = all_task all_task = all_frame run_tasks_ = [] for ii in all_task: fres = os.path.join(ii, 'log.lammps') if os.path.isfile(fres): with open(fres, 'r') as fp: lines = fp.read().split('\n') flag = False for jj in lines: if ("Final energy per atoms" in jj) and (not 'print' in jj): flag = True if not flag: run_tasks_.append(ii) else: run_tasks_.append(ii) fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name = fp_params['model_name'] if not model_name: models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir, ii) for ii in model_name] forward_files = ['conf.lmp', 'lammps.in'] + model_name backward_files = ['log.lammps', 'model_devi.log'] common_files = ['lammps.in'] + model_name if len(model_name) > 1: backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError("unknow task %s, something wrong" % task_type) if reprod_opt: for ii in work_path: run_tasks = [] for jj in run_tasks_: if ii in jj: run_tasks.append(os.path.basename(jj)) _run(machine, machine_type, ssh_sess, resources, command, ii, run_tasks, group_size, common_files, forward_files, backward_files) else: run_tasks = [os.path.basename(ii) for ii in run_tasks_] _run(machine, machine_type, ssh_sess, resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files)
def run_elastic(task_type, jdata, mdata, ssh_sess): conf_dir = jdata['conf_dir'] fp_params = jdata['vasp_params'] kspacing = fp_params['kspacing'] conf_path = os.path.abspath(conf_dir) task_path = re.sub('confs', '02.elastic', conf_path) if task_type == "vasp": work_path = os.path.join(task_path, 'vasp-k%.2f' % kspacing) elif task_type in lammps_task_type: work_path = os.path.join(task_path, task_type) assert (os.path.isdir(work_path)) print(work_path) all_task = glob.glob(os.path.join(work_path, "dfm-*")) all_task.sort() #vasp if task_type == "vasp": mdata = decide_fp_machine(mdata) vasp_exec = mdata['fp_command'] group_size = mdata['fp_group_size'] resources = mdata['fp_resources'] machine = mdata['fp_machine'] machine_type = mdata['fp_machine']['machine_type'] command = vasp_exec command = cmd_append_log(command, "log") run_tasks_ = [] for ii in all_task: fres = os.path.join(ii, 'OUTCAR') if os.path.isfile(fres): if not vasp.check_finished(fres): run_tasks_.append(ii) else: run_tasks_.append(ii) forward_files = ['INCAR', 'POSCAR', 'POTCAR', 'KPOINTS'] backward_files = ['OUTCAR', 'CONTCAR', 'OSZICAR'] common_files = ['INCAR', 'POTCAR', 'KPOINTS'] #lammps elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) lmp_exec = mdata['lmp_command'] group_size = mdata['model_devi_group_size'] resources = mdata['model_devi_resources'] machine = mdata['model_devi_machine'] machine_type = mdata['model_devi_machine']['machine_type'] command = lmp_exec + " -i lammps.in" command = cmd_append_log(command, "model_devi.log") run_tasks_ = [] for ii in all_task: fres = os.path.join(ii, 'log.lammps') if os.path.isfile(fres): with open(fres, 'r') as fp: lines = fp.read().split('\n') flag = False for jj in lines: if ('Final Stress' in jj) and (not 'print' in jj): flag = True if not flag: run_tasks_.append(ii) else: run_tasks_.append(ii) fp_params = jdata['lammps_params'] model_dir = fp_params['model_dir'] model_dir = os.path.abspath(model_dir) model_name = fp_params['model_name'] if not model_name: models = glob.glob(os.path.join(model_dir, '*pb')) model_name = [os.path.basename(ii) for ii in models] else: models = [os.path.join(model_dir, ii) for ii in model_name] forward_files = ['conf.lmp', 'lammps.in', 'strain.out'] + model_name backward_files = ['log.lammps', 'model_devi.log'] common_files = ['lammps.in'] + model_name if len(model_name) > 1: backward_files = backward_files + ['model_devi.out'] else: raise RuntimeError("unknow task %s, something wrong" % task_type) run_tasks = [os.path.basename(ii) for ii in run_tasks_] _run(machine, machine_type, ssh_sess, resources, command, work_path, run_tasks, group_size, common_files, forward_files, backward_files)
def run_property(confs, inter_param, property_list, mdata): # find all POSCARs and their name like mp-xxx # ... conf_dirs = glob.glob(confs) conf_dirs.sort() task_list = [] work_path_list = [] for ii in conf_dirs: for jj in property_list: # determine the suffix: from scratch or refine # ... if 'init_from_suffix' and 'output_suffix' in jj: suffix = jj['output_suffix'] else: suffix = 0 property_type = jj['type'] path_to_work = os.path.join(ii, property_type + '_' + suffix) work_path_list.append(path_to_work) tmp_task_list = glob.glob( os.path.join(path_to_work, 'task.[0-9]*[0-9]')) tmp_task_list.sort() task_list.append(tmp_task_list) # dispatch the tasks forward_files, forward_common_files, backward_files = make_task_trans_files( inter_param) # backward_files += logs # ... task_type = inter_param['type'] # vasp if task_type == "vasp": mdata = decide_fp_machine(mdata) elif task_type in lammps_task_type: mdata = decide_model_devi_machine(mdata) else: raise RuntimeError("unknown task %s, something wrong" % task_type) for ii in range(len(work_path_list)): work_path = work_path_list[ii] all_task = task_list[ii] run_tasks = util.collect_task(all_task, task_type) if len(run_tasks) == 0: return else: run_tasks = [os.path.basename(ii) for ii in all_task] machine, resources, command, group_size = util.get_machine_info( mdata, task_type) disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size) disp.run_jobs(resources, command, work_path, run_tasks, group_size, forward_common_files, forward_files, backward_files, outlog=task_type + '.out', errlog=task_type + '.err')