예제 #1
0
 def test_convert_mdata (self):
     mdata = json.load(open(machine_file))
     mdata = convert_mdata(mdata, ["fp"])
     self.assertEqual(mdata["fp_command"], "vasp_std")
     self.assertEqual(mdata["fp_group_size"], 8)
     self.assertEqual(mdata["fp_machine"]["batch_type"], "PBS")
     self.assertEqual(mdata["fp_user_forward_files"], ["vdw_kernel.bindat"])
예제 #2
0
def run_equi(confs, inter_param, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()
    # generate a list of task names like mp-xxx/relaxation/relax_task
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.abspath(os.path.join(ii, 'relaxation')))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, 'relax_task'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = convert_mdata(mdata, ["fp"])
    elif inter_type in lammps_task_type:
        mdata = convert_mdata(mdata, ["model_devi"])
    else:
        raise RuntimeError("unknown task %s, something wrong" % inter_type)

    # dispatch the tasks
    # POSCAR here is useless
    virtual_calculator = make_calculator(inter_param, "POSCAR")
    forward_files = virtual_calculator.forward_files()
    forward_common_files = virtual_calculator.forward_common_files()
    backward_files = virtual_calculator.backward_files()
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        # if LooseVersion()
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(
            mdata, inter_type)
        print('%d tasks will be submited ' % len(run_tasks))
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]
            disp = make_dispatcher(machine, resources, work_path,
                                   [run_tasks[ii]], group_size)
            print("%s --> Runing... " % (work_path))

        api_version = mdata.get('api_version', '0.9')
        if LooseVersion(api_version) < LooseVersion('1.0'):
            warnings.warn(
                f"the dpdispatcher will be updated to new version."
                f"And the interface may be changed. Please check the documents for more details"
            )
            disp.run_jobs(resources,
                          command,
                          work_path, [run_tasks[ii]],
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog='outlog',
                          errlog='errlog')
        elif LooseVersion(api_version) >= LooseVersion('1.0'):
            submission = make_submission(
                mdata_machine=machine,
                mdata_resource=resources,
                commands=[command],
                work_path=work_path,
                run_tasks=run_tasks,
                group_size=group_size,
                forward_common_files=forward_common_files,
                forward_files=forward_files,
                backward_files=backward_files,
                outlog='outlog',
                errlog='errlog')
            submission.run_submission()
예제 #3
0
def run_property(confs, inter_param, property_list, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    # conf_dirs = glob.glob(confs)
    # conf_dirs.sort()
    processes = len(property_list)
    pool = Pool(processes=processes)
    print("Submit job via %d processes" % processes)
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()
    task_list = []
    work_path_list = []
    multiple_ret = []
    for ii in conf_dirs:
        sepline(ch=ii, screen=True)
        for jj in property_list:
            # determine the suffix: from scratch or refine
            # ...
            if jj.get("skip", False):
                continue
            if 'init_from_suffix' and 'output_suffix' in jj:
                suffix = jj['output_suffix']
            elif 'reproduce' in jj and jj['reproduce']:
                suffix = 'reprod'
            else:
                suffix = '00'

            property_type = jj['type']
            path_to_work = os.path.abspath(
                os.path.join(ii, property_type + '_' + suffix))

            work_path_list.append(path_to_work)
            tmp_task_list = glob.glob(
                os.path.join(path_to_work, 'task.[0-9]*[0-9]'))
            tmp_task_list.sort()
            task_list.append(tmp_task_list)

            inter_param_prop = inter_param
            if 'cal_setting' in jj and 'overwrite_interaction' in jj[
                    'cal_setting']:
                inter_param_prop = jj['cal_setting']['overwrite_interaction']

            # dispatch the tasks
            # POSCAR here is useless
            virtual_calculator = make_calculator(inter_param_prop, "POSCAR")
            forward_files = virtual_calculator.forward_files(property_type)
            forward_common_files = virtual_calculator.forward_common_files(
                property_type)
            backward_files = virtual_calculator.backward_files(property_type)
            #    backward_files += logs
            # ...
            inter_type = inter_param_prop['type']
            # vasp
            if inter_type == "vasp":
                mdata = convert_mdata(mdata, ["fp"])
            elif inter_type in lammps_task_type:
                mdata = convert_mdata(mdata, ["model_devi"])
            else:
                raise RuntimeError("unknown task %s, something wrong" %
                                   inter_type)

            work_path = path_to_work
            all_task = tmp_task_list
            run_tasks = util.collect_task(all_task, inter_type)
            if len(run_tasks) == 0:
                continue
            else:
                ret = pool.apply_async(worker, (
                    work_path,
                    all_task,
                    forward_common_files,
                    forward_files,
                    backward_files,
                    mdata,
                    inter_type,
                ))
                multiple_ret.append(ret)
    pool.close()
    pool.join()
    for ii in range(len(multiple_ret)):
        if not multiple_ret[ii].successful():
            raise RuntimeError("Job %d is not successful!" % ii)
    print('%d jobs are finished' % len(multiple_ret))
예제 #4
0
def run_equi(confs, inter_param, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()

    processes = len(conf_dirs)
    pool = Pool(processes=processes)
    print("Submit job via %d processes" % processes)

    # generate a list of task names like mp-xxx/relaxation/relax_task
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.abspath(os.path.join(ii, 'relaxation')))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, 'relax_task'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = convert_mdata(mdata, ["fp"])
    elif inter_type in lammps_task_type:
        mdata = convert_mdata(mdata, ["model_devi"])
    else:
        raise RuntimeError("unknown task %s, something wrong" % inter_type)

    # dispatch the tasks
    # POSCAR here is useless
    virtual_calculator = make_calculator(inter_param, "POSCAR")
    forward_files = virtual_calculator.forward_files()
    forward_common_files = virtual_calculator.forward_common_files()
    backward_files = virtual_calculator.backward_files()
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(
            mdata, inter_type)
        print('%d tasks will be submited ' % len(run_tasks))
        multiple_ret = []
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]

            ret = pool.apply_async(worker, (
                work_path,
                run_tasks[ii],
                forward_common_files,
                forward_files,
                backward_files,
                mdata,
                inter_type,
            ))
            multiple_ret.append(ret)
        pool.close()
        pool.join()
        for ii in range(len(multiple_ret)):
            if not multiple_ret[ii].successful():
                raise RuntimeError(
                    "Task %d is not successful! work_path: %s " %
                    (ii, work_path_list[ii]))
        print('finished')
예제 #5
0
def gen_init_bulk(args) :
    try:
       import ruamel
       from monty.serialization import loadfn,dumpfn
       warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
       jdata=loadfn(args.PARAM)
       if args.MACHINE is not None:
          mdata=loadfn(args.MACHINE)
    except:
       with open (args.PARAM, 'r') as fp :
           jdata = json.load (fp)
       if args.MACHINE is not None:
          with open (args.MACHINE, "r") as fp:
              mdata = json.load(fp)

    if args.MACHINE is not None:
       # Selecting a proper machine
       mdata = convert_mdata(mdata, ["fp"])
       #disp = make_dispatcher(mdata["fp_machine"])

    # Decide work path
    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info ("# working dir %s" % out_dir)
    # Decide whether to use a given poscar
    from_poscar = False 
    if 'from_poscar' in jdata :
        from_poscar = jdata['from_poscar']
    # Verify md_nstep
    md_nstep_jdata = jdata["md_nstep"]
    try:
        md_incar = jdata['md_incar']
        if os.path.isfile(md_incar):
            standard_incar = incar_upper(Incar.from_file(md_incar))
            nsw_flag = False
            if "NSW" in standard_incar:
                    nsw_flag = True
                    nsw_steps = standard_incar['NSW']
            #dlog.info("nsw_steps is", nsw_steps)
            #dlog.info("md_nstep_jdata is", md_nstep_jdata)
            if nsw_flag:
                if (nsw_steps != md_nstep_jdata):
                    dlog.info("WARNING: your set-up for MD steps in PARAM and md_incar are not consistent!")
                    dlog.info("MD steps in PARAM is %d"%(md_nstep_jdata))
                    dlog.info("MD steps in md_incar is %d"%(nsw_steps))
                    dlog.info("DP-GEN will use settings in md_incar!")
                    jdata['md_nstep'] = nsw_steps
    except KeyError:
        pass
    ## correct element name 
    temp_elements = []
    for ele in jdata['elements']:
        temp_elements.append(ele[0].upper() + ele[1:])
    jdata['elements'] = temp_elements
    dlog.info("Elements are %s"% ' '.join(jdata['elements']))

    ## Iteration 
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1 :
            dlog.info("Current stage is 1, relax")
            create_path(out_dir)
            shutil.copy2(args.PARAM, os.path.join(out_dir, 'param.json'))
            if from_poscar :
                make_super_cell_poscar(jdata)
            else :
                make_unit_cell(jdata)
                make_super_cell(jdata)
                place_element(jdata)
            if args.MACHINE is not None:
               make_vasp_relax(jdata, mdata)
               run_vasp_relax(jdata, mdata)
            else:
               make_vasp_relax(jdata, {"fp_resources":{}})
        elif stage == 2 :
            dlog.info("Current stage is 2, perturb and scale")
            make_scale(jdata)
            pert_scaled(jdata)
        elif stage == 3 :
            dlog.info("Current stage is 3, run a short md")
            if args.MACHINE is not None:
               make_vasp_md(jdata, mdata)
               run_vasp_md(jdata, mdata)
            else:
               make_vasp_md(jdata, {"fp_resources":{}})
               
        elif stage == 4 :
            dlog.info("Current stage is 4, collect data")
            coll_vasp_md(jdata)
        else :
            raise RuntimeError("unknown stage %d" % stage)