Esempio n. 1
0
def run_phonon(task_type, jdata, mdata):
    user = ('user_incar' in jdata.keys())
    work_path = util.make_work_path(jdata, '06.phonon', False, False, user)

    all_task = glob.glob(os.path.join(work_path, '.'))

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
        machine, machine_type, ssh_sess, resources, command, group_size = util.get_machine_info(
            mdata, task_type)

        run_tasks = util.collect_task(all_task, task_type)
        forward_files = ['INCAR', 'POTCAR', 'KPOINTS']
        backward_files = ['OUTCAR', 'OSZICAR', 'vasprun.xml']
        common_files = ['POSCAR']

        _run(machine, machine_type, ssh_sess, resources, command, work_path,
             run_tasks, group_size, common_files, forward_files,
             backward_files)
    #lammps
    elif task_type in lammps_task_type:
        None
    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)
Esempio n. 2
0
def run_phonon(task_type,jdata,mdata):
    user= ('user_incar' in jdata.keys())
    work_path=util.make_work_path(jdata,'06.phonon',False,False,user)

    all_task = glob.glob(os.path.join(work_path,'.'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)
        machine,resources,command,group_size=util.get_machine_info(mdata,task_type)

        run_tasks = util.collect_task(all_task,task_type)
        forward_files = ['INCAR', 'POTCAR','KPOINTS','KPOINTS']
        backward_files = ['OUTCAR',  task_type+'.out' , 'OSZICAR','vasprun.xml']
        common_files=['POSCAR']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')

        disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
        disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  common_files,
                  forward_files,
                  backward_files,
                  outlog=task_type+'.out',
                  errlog=task_type+'.err')
    #lammps
    elif task_type in lammps_task_type:
        None
    else:
        raise RuntimeError ("unknown task %s, something wrong" % task_type)
Esempio n. 3
0
def run_equi(task_type,jdata,mdata):
        #rmprint("This module has been run !")

    work_path=util.make_work_path(jdata,'00.equi',False,False,False)
    all_task = glob.glob(os.path.join(work_path,'.'))

    #vasp
    if task_type=="vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POTCAR', 'KPOINTS']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')
        backward_files = ['OUTCAR', task_type+'.out' , 'CONTCAR','OSZICAR']
        common_files=['POSCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        forward_files = ['conf.lmp', 'lammps.in']
        backward_files = ['dump.relax','log.lammps', task_type+'.out']

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        common_files = model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task,task_type)
    if len(run_tasks)==0: return
    machine,resources,command,group_size=util.get_machine_info(mdata,task_type)
    disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
    #debug#
    #print(' '.join(common_files))
    #print(' '.join(forward_files))
    #print(' '.join(backward_files))
    disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  common_files,
                  forward_files,
                  backward_files,
                  outlog=task_type+'.out',
                  errlog=task_type+'.err')
Esempio n. 4
0
def run_surf(task_type,jdata,mdata):
    static=jdata['static-opt']
    work_path=util.make_work_path(jdata,'05.surf',False,static,False)

    all_task = glob.glob(os.path.join(work_path,'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POSCAR','POTCAR','KPOINTS']
        backward_files = ['OUTCAR',  task_type+'.out' , 'OSZICAR']
        common_files=['INCAR','POTCAR']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in']+model_name
        backward_files = ['log.lammps',task_type+'.out']
        common_files=['lammps.in']+model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task,task_type)
    if len(run_tasks)==0: return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine,resources,command,group_size=util.get_machine_info(mdata,task_type)
        disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
        disp.run_jobs(resources,
                    command,
                    work_path,
                    run_tasks,
                    group_size,
                    common_files,
                    forward_files,
                    backward_files,
                    outlog=task_type+'.out',
                    errlog=task_type+'.err')
Esempio n. 5
0
def run_equi(confs,
             inter_param,
             mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()
    # generate a list of task names like mp-xxx/relaxation/relax_task
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.abspath(os.path.join(ii, 'relaxation')))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, 'relax_task'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = decide_fp_machine(mdata)
    elif inter_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
    else:
        raise RuntimeError("unknown task %s, something wrong" % inter_type)

    # dispatch the tasks
    # POSCAR here is useless
    virtual_calculator = make_calculator(inter_param, "POSCAR")
    forward_files = virtual_calculator.forward_files()
    forward_common_files = virtual_calculator.forward_common_files()
    backward_files = virtual_calculator.backward_files()
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(mdata, inter_type)
        print('%d tasks will be submited '%len(run_tasks))
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]
            disp = make_dispatcher(machine, resources, work_path, [run_tasks[ii]], group_size)
            print("%s --> Runing... "%(work_path))
            disp.run_jobs(resources,
                          command,
                          work_path,
                          [run_tasks[ii]],
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog='outlog',
                          errlog='errlog')
Esempio n. 6
0
def run_vacancy(task_type,jdata,mdata):

    work_path=util.make_work_path(jdata,'03.vacancy',False,False,False)
    all_task = glob.glob(os.path.join(work_path,'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POSCAR','POTCAR']
        backward_files = ['OUTCAR',  'autotest.out' , 'OSZICAR']
        common_files=['INCAR','POTCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        common_files = model_name
        forward_files = ['conf.lmp', 'lammps.in']+model_name
        backward_files = ['log.lammps','autotest.out']
        common_files=['lammps.in']+model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task,task_type)
    if len(run_tasks)==0: return
    machine,machine_type,ssh_sess,resources,command,group_size=util.get_machine_info(mdata,task_type)
    disp = make_dispatcher(machine)
    disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  common_files,
                  forward_files,
                  backward_files,
                  outlog='autotest.out',
                  errlog='autotest.err')
Esempio n. 7
0
def gen_init_surf(args):
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
        if args.MACHINE is not None:
            mdata = loadfn(args.MACHINE)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)
        if args.MACHINE is not None:
            with open(args.MACHINE, "r") as fp:
                mdata = json.load(fp)

    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info("# working dir %s" % out_dir)

    if args.MACHINE is not None:
        # Decide a proper machine
        mdata = decide_fp_machine(mdata)
        fp_machine = mdata['fp_machine']
        fp_ssh_sess = SSHSession(fp_machine)
    #stage = args.STAGE
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1:
            create_path(out_dir)
            make_super_cell_pymatgen(jdata)
            place_element(jdata)
            make_vasp_relax(jdata)
            if args.MACHINE is not None:
                run_vasp_relax(jdata, mdata, fp_ssh_sess)
        # elif stage == 0 :
        #     # create_path(out_dir)
        #     # make_super_cell(jdata)
        #     # place_element(jdata)
        #     # make_vasp_relax(jdata)
        #     # make_scale(jdata)
        #     # pert_scaled(jdata)
        #     # poscar_elong('POSCAR', 'POSCAR.out', 3)
        #     pert_scaled(jdata)
        elif stage == 2:
            make_scale(jdata)
            pert_scaled(jdata)
        else:
            raise RuntimeError("unknown stage %d" % stage)
Esempio n. 8
0
def run_equi(confs, inter_param, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = glob.glob(confs)
    conf_dirs.sort()
    # generate a list of task names like mp-xxx/relaxation
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.join(ii, 'relaxation'))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, '.'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = decide_fp_machine(mdata)
    elif inter_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
    else:
        raise RuntimeError("unknown task %s, something wrong" % task_type)

    # dispatch the tasks
    forward_files, forward_common_files, backward_files = make_task_trans_files(
        inter_param)
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(
            mdata, inter_type)
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]
            disp = make_dispatcher(machine, resources, work_path,
                                   run_tasks[ii], group_size)
            disp.run_jobs(resources,
                          command,
                          work_path,
                          run_tasks[ii],
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog=inter_type + '.out',
                          errlog=inter_type + '.err')
Esempio n. 9
0
def run_phonon(task_type, jdata, mdata, ssh_sess):
    conf_dir = jdata['conf_dir']
    fp_params = jdata['vasp_params']
    kspacing = fp_params['kspacing']

    conf_path = os.path.abspath(conf_dir)
    task_path = re.sub('confs', '06.phonon', conf_path)
    if task_type == "vasp":
        work_path = os.path.join(task_path, 'vasp-k%.2f' % kspacing)
    elif task_type in lammps_task_type:
        work_path = os.path.join(task_path, task_type)
    assert (os.path.isdir(work_path))

    all_task = glob.glob(os.path.join(work_path, '.'))

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
        vasp_exec = mdata['fp_command']
        group_size = mdata['fp_group_size']
        resources = mdata['fp_resources']
        machine = mdata['fp_machine']
        machine_type = mdata['fp_machine']['machine_type']
        command = vasp_exec
        command = cmd_append_log(command, "log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'OUTCAR')
            if os.path.isfile(fres):
                if not vasp.check_finished(fres):
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        run_tasks = [os.path.basename(ii) for ii in run_tasks_]
        forward_files = ['INCAR', 'POTCAR', 'KPOINTS']
        backward_files = ['OUTCAR', 'OSZICAR', 'vasprun.xml']
        common_files = ['POSCAR']

        _run(machine, machine_type, ssh_sess, resources, command, work_path,
             run_tasks, group_size, common_files, forward_files,
             backward_files)
    #lammps
    elif task_type in lammps_task_type:
        None
    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)
Esempio n. 10
0
def run_equi(task_type, jdata, mdata):
    #rmprint("This module has been run !")

    work_path = util.make_work_path(jdata, '00.equi', False, False, False)
    all_task = glob.glob(os.path.join(work_path, '.'))

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POTCAR']
        backward_files = ['OUTCAR', 'CONTCAR', 'OSZICAR']
        common_files = ['POSCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        forward_files = ['conf.lmp', 'lammps.in']
        backward_files = ['dump.relax', 'log.lammps', 'model_devi.log']

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name = fp_params['model_name']
        if not model_name:
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir, ii) for ii in model_name]
        common_files = model_name

        if len(model_name) > 1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task, task_type)

    machine, machine_type, ssh_sess, resources, command, group_size = util.get_machine_info(
        mdata, task_type)

    _run(machine, machine_type, ssh_sess, resources, command, work_path,
         run_tasks, group_size, common_files, forward_files, backward_files)
Esempio n. 11
0
File: run.py Progetto: obaica/dpgen
def run_phonon(task_type, jdata, mdata, ssh_sess):
    user = ('user_incar' in jdata.keys())
    work_path = make_work_path(jdata, '06.phonon', False, False, user)

    all_task = glob.glob(os.path.join(work_path, '.'))

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
        vasp_exec = mdata['fp_command']
        group_size = mdata['fp_group_size']
        resources = mdata['fp_resources']
        machine = mdata['fp_machine']
        machine_type = mdata['fp_machine']['machine_type']
        command = vasp_exec
        command = cmd_append_log(command, "log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'OUTCAR')
            if os.path.isfile(fres):
                if not vasp.check_finished(fres):
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        run_tasks = [os.path.basename(ii) for ii in run_tasks_]
        forward_files = ['INCAR', 'POTCAR', 'KPOINTS']
        backward_files = ['OUTCAR', 'OSZICAR', 'vasprun.xml']
        common_files = ['POSCAR']

        _run(machine, machine_type, ssh_sess, resources, command, work_path,
             run_tasks, group_size, common_files, forward_files,
             backward_files)
    #lammps
    elif task_type in lammps_task_type:
        None
    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)
Esempio n. 12
0
def run_property(confs, inter_param, property_list, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    # conf_dirs = glob.glob(confs)
    # conf_dirs.sort()
    processes = len(property_list)
    pool = Pool(processes=processes)
    print("Submit job via %d processes" % processes)
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()
    task_list = []
    work_path_list = []
    for ii in conf_dirs:
        sepline(ch=ii, screen=True)
        for jj in property_list:
            # determine the suffix: from scratch or refine
            # ...
            if jj.get("skip", False):
                continue
            if 'init_from_suffix' and 'output_suffix' in jj:
                suffix = jj['output_suffix']
            elif 'reproduce' in jj and jj['reproduce']:
                suffix = 'reprod'
            else:
                suffix = '00'

            property_type = jj['type']
            path_to_work = os.path.abspath(
                os.path.join(ii, property_type + '_' + suffix))

            work_path_list.append(path_to_work)
            tmp_task_list = glob.glob(
                os.path.join(path_to_work, 'task.[0-9]*[0-9]'))
            tmp_task_list.sort()
            task_list.append(tmp_task_list)

            inter_param_prop = inter_param
            if 'cal_setting' in jj and 'overwrite_interaction' in jj[
                    'cal_setting']:
                inter_param_prop = jj['cal_setting']['overwrite_interaction']

            # dispatch the tasks
            # POSCAR here is useless
            virtual_calculator = make_calculator(inter_param_prop, "POSCAR")
            forward_files = virtual_calculator.forward_files(property_type)
            forward_common_files = virtual_calculator.forward_common_files(
                property_type)
            backward_files = virtual_calculator.backward_files(property_type)
            #    backward_files += logs
            # ...
            inter_type = inter_param_prop['type']
            # vasp
            if inter_type == "vasp":
                mdata = decide_fp_machine(mdata)
            elif inter_type in lammps_task_type:
                mdata = decide_model_devi_machine(mdata)
            else:
                raise RuntimeError("unknown task %s, something wrong" %
                                   inter_type)

            work_path = path_to_work
            all_task = tmp_task_list
            run_tasks = util.collect_task(all_task, inter_type)
            if len(run_tasks) == 0:
                return
            else:
                ret = pool.apply_async(worker, (
                    work_path,
                    all_task,
                    forward_common_files,
                    forward_files,
                    backward_files,
                    mdata,
                    inter_type,
                ))
            # run_tasks = [os.path.basename(ii) for ii in all_task]
            # machine, resources, command, group_size = util.get_machine_info(mdata, inter_type)
            # disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
            # disp.run_jobs(resources,
            #               command,
            #               work_path,
            #               run_tasks,
            #               group_size,
            #               forward_common_files,
            #               forward_files,
            #               backward_files,
            #               outlog='outlog',
            #               errlog='errlog')
    pool.close()
    pool.join()
    if ret.successful():
        print('finished')
Esempio n. 13
0
def _main():
    parser = argparse.ArgumentParser(description="gen init confs")
    parser.add_argument('PARAM',
                        type=str,
                        help="parameter file, json/yaml format")
    parser.add_argument(
        "MACHINE",
        type=str,
        help="The settings of the machine running the generator")
    args = parser.parse_args()

    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
        mdata = loadfn(args.MACHINE)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)
        with open(args.MACHINE, "r") as fp:
            mdata = json.load(fp)
    # Selecting a proper machine
    mdata = decide_fp_machine(mdata)
    fp_machine = mdata['fp_machine']
    fp_ssh_sess = SSHSession(fp_machine)
    # Decide work path
    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    print("# working dir %s" % out_dir)
    # Decide whether to use a given poscar
    from_poscar = False
    if 'from_poscar' in jdata:
        from_poscar = jdata['from_poscar']
    # Verify md_nstep
    md_nstep_jdata = jdata["md_nstep"]
    try:
        md_incar = jdata['md_incar']
        if os.path.isfile(md_incar):
            with open(md_incar, "r") as fr:
                md_incar_lines = fr.readlines()
            nsw_flag = False
            for incar_line in md_incar_lines:
                line = incar_line.split()
                if "NSW" in line:
                    nsw_flag = True
                    nsw_steps = int(incar_line.split()[-1])
                    break
            #print("nsw_steps is", nsw_steps)
            #print("md_nstep_jdata is", md_nstep_jdata)
            if nsw_flag:
                if (nsw_steps != md_nstep_jdata):
                    print(
                        "WARNING: your set-up for MD steps in PARAM and md_incar are not consistent!"
                    )
                    print("MD steps in PARAM is %d" % (md_nstep_jdata))
                    print("MD steps in md_incar is %d" (nsw_steps))
                    print("DP-GEN will use settings in md_incar!")
                    jdata['md_nstep'] = nsw_steps
    except:
        pass
    ## correct element name
    temp_elements = []
    for ele in jdata['elements']:
        temp_elements.append(ele[0].upper() + ele[1:])
    jdata['elements'] = temp_elements
    print("Elements are", jdata['elements'])

    ## Iteration
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1:
            print("Current stage is 1, relax")
            create_path(out_dir)
            shutil.copy2(args.PARAM, os.path.join(out_dir, 'param.json'))
            if from_poscar:
                make_super_cell_poscar(jdata)
            else:
                make_unit_cell(jdata)
                make_super_cell(jdata)
                place_element(jdata)
            make_vasp_relax(jdata, mdata)
            run_vasp_relax(jdata, mdata, fp_ssh_sess)
        elif stage == 2:
            print("Current stage is 2, perturb and scale")
            make_scale(jdata)
            pert_scaled(jdata)
        elif stage == 3:
            print("Current stage is 3, run a short md")
            make_vasp_md(jdata)
            run_vasp_md(jdata, mdata, fp_ssh_sess)
        elif stage == 4:
            print("Current stage is 4, collect data")
            coll_vasp_md(jdata)
        else:
            raise RuntimeError("unknown stage %d" % stage)
Esempio n. 14
0
File: run.py Progetto: obaica/dpgen
def run_task(json_file, machine_file):
    with open(json_file, 'r') as fp:
        jdata = json.load(fp)
    with open(machine_file, 'r') as fp:
        mdata = json.load(fp)

    record = "record.auto_test"

    model_devi_mdata = decide_model_devi_machine(mdata)
    model_devi_machine = model_devi_mdata['model_devi_machine']
    if ('machine_type' in model_devi_machine) and  \
       (model_devi_machine['machine_type'] == 'ucloud'):
        model_devi_ssh_sess = None
    else:
        model_devi_ssh_sess = SSHSession(model_devi_machine)

    fp_mdata = decide_fp_machine(mdata)
    fp_machine = fp_mdata['fp_machine']
    if ('machine_type' in fp_machine) and  \
       (fp_machine['machine_type'] == 'ucloud'):
        fp_ssh_sess = None
    else:
        fp_ssh_sess = SSHSession(fp_machine)

    confs = jdata['conf_dir']
    ele_list = [key for key in jdata['potcar_map'].keys()]
    key_id = jdata['key_id']

    ii = jdata['task_type']
    jj = jdata['task']
    task_list = [
        'equi', 'eos', 'elastic', 'vacancy', 'interstitial', 'surf', 'phonon',
        'all'
    ]
    task_type_list = ['vasp'] + lammps_task_type
    if jj not in task_list:
        raise RuntimeError("unknow task %s, something wrong" % jj)
    if ii not in task_type_list:
        raise RuntimeError("unknow task type %s, something wrong" % ii)

    #gen_configuration
    if 'confs' in confs and (not os.path.exists(confs + '/POSCAR')):
        print('generate %s' % (ele_list))
        if len(ele_list) == 1:
            gen_confs.gen_element(ele_list[0], key_id)
        else:
            gen_confs.gen_alloy(ele_list, key_id)
    #default task
    log_iter("gen_equi", ii, "equi")
    gen_equi(ii, jdata, mdata)
    log_iter("run_equi", ii, "equi")
    run_equi(ii, jdata, mdata, model_devi_ssh_sess)
    log_iter("cmpt_equi", ii, "equi")
    cmpt_equi(ii, jdata, mdata)
    if jj == "eos" or jj == "all":
        log_iter("gen_eos", ii, "eos")
        gen_eos(ii, jdata, mdata)
        log_iter("run_eos", ii, "eos")
        run_eos(ii, jdata, mdata, model_devi_ssh_sess)
        log_iter("cmpt_eos", ii, "eos")
        cmpt_eos(ii, jdata, mdata)
    if jj == "elastic" or jj == "all":
        log_iter("gen_elastic", ii, "elastic")
        gen_elastic(ii, jdata, mdata)
        log_iter("run_elastic", ii, "elastic")
        run_elastic(ii, jdata, mdata, model_devi_ssh_sess)
        log_iter("cmpt_elastic", ii, "elastic")
        cmpt_elastic(ii, jdata, mdata)
    if jj == "vacancy" or jj == "all":
        log_iter("gen_vacancy", ii, "vacancy")
        gen_vacancy(ii, jdata, mdata)
        log_iter("run_vacancy", ii, "vacancy")
        run_vacancy(ii, jdata, mdata, model_devi_ssh_sess)
        log_iter("cmpt_vacancy", ii, "vacancy")
        cmpt_vacancy(ii, jdata, mdata)
    if jj == "interstitial" or jj == "all":
        log_iter("gen_interstitial", ii, "interstitial")
        gen_interstitial(ii, jdata, mdata)
        log_iter("run_interstitial", ii, "interstitial")
        run_interstitial(ii, jdata, mdata, model_devi_ssh_sess)
        log_iter("cmpt_interstitial", ii, "interstitial")
        cmpt_interstitial(ii, jdata, mdata)
    if jj == "surf" or jj == "all":
        log_iter("gen_surf", ii, "surf")
        gen_surf(ii, jdata, mdata)
        log_iter("run_surf", ii, "surf")
        run_surf(ii, jdata, mdata, model_devi_ssh_sess)
        log_iter("cmpt_surf", ii, "surf")
        cmpt_surf(ii, jdata, mdata)
    '''
    if jj=="phonon":
        log_iter ("gen_phonon", ii, "phonon")
        gen_phonon (ii, jdata, mdata) 
        log_iter ("run_phonon", ii, "phonon")
        run_phonon  (ii, jdata, mdata,model_devi_ssh_sess)
        log_iter ("cmpt_phonon", ii, "phonon")
        cmpt_phonon (ii, jdata, mdata)
    '''
    record_iter(record, confs, ii, jj)
Esempio n. 15
0
File: run.py Progetto: obaica/dpgen
def run_surf(task_type, jdata, mdata, ssh_sess):
    static = jdata['static-opt']
    work_path = make_work_path(jdata, '05.surf', False, static, False)

    all_task = glob.glob(os.path.join(work_path, 'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
        vasp_exec = mdata['fp_command']
        group_size = mdata['fp_group_size']
        resources = mdata['fp_resources']
        machine = mdata['fp_machine']
        machine_type = mdata['fp_machine']['machine_type']
        command = vasp_exec
        command = cmd_append_log(command, "log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'OUTCAR')
            if os.path.isfile(fres):
                if not vasp.check_finished(fres):
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        forward_files = ['INCAR', 'POSCAR', 'POTCAR']
        backward_files = ['OUTCAR', 'OSZICAR']
        common_files = ['INCAR', 'POTCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
        lmp_exec = mdata['lmp_command']
        group_size = mdata['model_devi_group_size']
        resources = mdata['model_devi_resources']
        machine = mdata['model_devi_machine']
        machine_type = mdata['model_devi_machine']['machine_type']
        command = lmp_exec + " -i lammps.in"
        command = cmd_append_log(command, "model_devi.log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'log.lammps')
            if os.path.isfile(fres):
                with open(fres, 'r') as fp:
                    lines = fp.read().split('\n')
                flag = False
                for jj in lines:
                    if ("Final energy per atoms"
                            in jj) and (not 'print' in jj):
                        flag = True
                if not flag:
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name = fp_params['model_name']
        if not model_name:
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir, ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in'] + model_name
        backward_files = ['log.lammps', 'model_devi.log']
        common_files = ['lammps.in'] + model_name

        if len(model_name) > 1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)

    run_tasks = [os.path.basename(ii) for ii in run_tasks_]
    _run(machine, machine_type, ssh_sess, resources, command, work_path,
         run_tasks, group_size, common_files, forward_files, backward_files)
Esempio n. 16
0
def run_iter(param_file, machine_file):
    """ init (iter 0): init_pick

    tasks (iter > 0):
    00 make_train (same as generator)
    01 run_train (same as generator)
    02 post_train (same as generator)
    03 make_model_devi
    04 run_model_devi
    05 post_model_devi
    06 make_fp
    07 run_fp (same as generator)
    08 post_fp (same as generator)
    """
    # TODO: function of handling input json should be combined as one function
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(param_file)
        mdata = loadfn(machine_file)
    except:
        with open(param_file, 'r') as fp:
            jdata = json.load(fp)
        with open(machine_file, 'r') as fp:
            mdata = json.load(fp)

    if jdata.get('pretty_print', False):
        fparam = SHORT_CMD+'_' + \
            param_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json')
        dumpfn(jdata, fparam, indent=4)
        fmachine = SHORT_CMD+'_' + \
            machine_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json')
        dumpfn(mdata, fmachine, indent=4)

    if mdata.get('handlers', None):
        if mdata['handlers'].get('smtp', None):
            que = queue.Queue(-1)
            queue_handler = logging.handlers.QueueHandler(que)
            smtp_handler = logging.handlers.SMTPHandler(
                **mdata['handlers']['smtp'])
            listener = logging.handlers.QueueListener(que, smtp_handler)
            dlog.addHandler(queue_handler)
            listener.start()

    max_tasks = 10000
    numb_task = 9
    record = "record.dpgen"
    iter_rec = [0, -1]
    if os.path.isfile(record):
        with open(record) as frec:
            for line in frec:
                iter_rec = [int(x) for x in line.split()]
        dlog.info("continue from iter %03d task %02d" %
                  (iter_rec[0], iter_rec[1]))

    cont = True
    ii = -1
    while cont:
        ii += 1
        iter_name = make_iter_name(ii)
        sepline(iter_name, '=')
        for jj in range(numb_task):
            if ii * max_tasks + jj <= iter_rec[0] * max_tasks + iter_rec[1]:
                continue
            task_name = "task %02d" % jj
            sepline("{} {}".format(iter_name, task_name), '-')
            jdata['model_devi_jobs'] = [{} for _ in range(ii + 1)]
            if ii == 0 and jj < 6:
                if jj == 0:
                    log_iter("init_pick", ii, jj)
                    init_pick(ii, jdata, mdata)
                dlog.info("first iter, skip step 1-5")
            elif jj == 0:
                log_iter("make_train", ii, jj)
                make_train(ii, jdata, mdata)
            elif jj == 1:
                log_iter("run_train", ii, jj)
                mdata = decide_train_machine(mdata)
                disp = make_dispatcher(mdata['train_machine'])
                run_train(ii, jdata, mdata)
            elif jj == 2:
                log_iter("post_train", ii, jj)
                post_train(ii, jdata, mdata)
            elif jj == 3:
                log_iter("make_model_devi", ii, jj)
                cont = make_model_devi(ii, jdata, mdata)
                if not cont or ii >= jdata.get("stop_iter", ii + 1):
                    break
            elif jj == 4:
                log_iter("run_model_devi", ii, jj)
                mdata = decide_model_devi_machine(mdata)
                disp = make_dispatcher(mdata['model_devi_machine'])
                run_model_devi(ii, jdata, mdata, disp)
            elif jj == 5:
                log_iter("post_model_devi", ii, jj)
                post_model_devi(ii, jdata, mdata)
            elif jj == 6:
                log_iter("make_fp", ii, jj)
                make_fp(ii, jdata, mdata)
            elif jj == 7:
                log_iter("run_fp", ii, jj)
                if jdata.get("labeled", False):
                    dlog.info("already have labeled data, skip run_fp")
                else:
                    mdata = decide_fp_machine(mdata)
                    disp = make_dispatcher(mdata['fp_machine'])
                    run_fp(ii, jdata, mdata)
            elif jj == 8:
                log_iter("post_fp", ii, jj)
                if jdata.get("labeled", False):
                    dlog.info("already have labeled data, skip post_fp")
                else:
                    post_fp(ii, jdata)
            else:
                raise RuntimeError("unknown task %d, something wrong" % jj)
            record_iter(record, ii, jj)
Esempio n. 17
0
def gen_init_bulk(args) :
    try:
       import ruamel
       from monty.serialization import loadfn,dumpfn
       warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
       jdata=loadfn(args.PARAM)
       if args.MACHINE is not None:
          mdata=loadfn(args.MACHINE)
    except:
       with open (args.PARAM, 'r') as fp :
           jdata = json.load (fp)
       if args.MACHINE is not None:
          with open (args.MACHINE, "r") as fp:
              mdata = json.load(fp)

    if args.MACHINE is not None:
       # Selecting a proper machine
       mdata = decide_fp_machine(mdata)
       #disp = make_dispatcher(mdata["fp_machine"])

    # Decide work path
    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info ("# working dir %s" % out_dir)
    # Decide whether to use a given poscar
    from_poscar = False 
    if 'from_poscar' in jdata :
        from_poscar = jdata['from_poscar']
    # Verify md_nstep
    md_nstep_jdata = jdata["md_nstep"]
    try:
        md_incar = jdata['md_incar']
        if os.path.isfile(md_incar):
            standard_incar = incar_upper(Incar.from_file(md_incar))
            nsw_flag = False
            if "NSW" in standard_incar:
                    nsw_flag = True
                    nsw_steps = standard_incar['NSW']
            #dlog.info("nsw_steps is", nsw_steps)
            #dlog.info("md_nstep_jdata is", md_nstep_jdata)
            if nsw_flag:
                if (nsw_steps != md_nstep_jdata):
                    dlog.info("WARNING: your set-up for MD steps in PARAM and md_incar are not consistent!")
                    dlog.info("MD steps in PARAM is %d"%(md_nstep_jdata))
                    dlog.info("MD steps in md_incar is %d"%(nsw_steps))
                    dlog.info("DP-GEN will use settings in md_incar!")
                    jdata['md_nstep'] = nsw_steps
    except KeyError:
        pass
    ## correct element name 
    temp_elements = []
    for ele in jdata['elements']:
        temp_elements.append(ele[0].upper() + ele[1:])
    jdata['elements'] = temp_elements
    dlog.info("Elements are %s"% ' '.join(jdata['elements']))

    ## Iteration 
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1 :
            dlog.info("Current stage is 1, relax")
            create_path(out_dir)
            shutil.copy2(args.PARAM, os.path.join(out_dir, 'param.json'))
            if from_poscar :
                make_super_cell_poscar(jdata)
            else :
                make_unit_cell(jdata)
                make_super_cell(jdata)
                place_element(jdata)
            if args.MACHINE is not None:
               make_vasp_relax(jdata, mdata)
               run_vasp_relax(jdata, mdata)
            else:
               make_vasp_relax(jdata, {"fp_resources":{}})
        elif stage == 2 :
            dlog.info("Current stage is 2, perturb and scale")
            make_scale(jdata)
            pert_scaled(jdata)
        elif stage == 3 :
            dlog.info("Current stage is 3, run a short md")
            make_vasp_md(jdata)
            if args.MACHINE is not None:
               run_vasp_md(jdata, mdata)
        elif stage == 4 :
            dlog.info("Current stage is 4, collect data")
            coll_vasp_md(jdata)
        else :
            raise RuntimeError("unknown stage %d" % stage)
Esempio n. 18
0
def run_interstitial(task_type,jdata,mdata):

    reprod_opt=jdata['reprod-opt']
    work_path=util.make_work_path(jdata,'04.interstitial',reprod_opt,False,False)
    all_task = glob.glob(os.path.join(work_path,'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POSCAR','POTCAR',"KPOINTS"]
        backward_files = ['OUTCAR',  task_type+'.out' , 'XDATCAR','OSZICAR']
        common_files=['INCAR']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        if reprod_opt:
            all_frame=[]
            for ii in all_task:
                all_frame+=(glob.glob(os.path.join(ii,'frame.*')))
            work_path = all_task
            all_task = all_frame

        run_tasks_ = []
        for ii in all_task:
            # fres = os.path.join(ii, 'log.lammps')
            # if os.path.isfile(fres) :
            #     if not lammps.check_finished(fres):
            #         run_tasks_.append(ii)
            # else :
            #     run_tasks_.append(ii)
            run_tasks_.append(ii)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in']+model_name
        backward_files = ['log.lammps', task_type+'.out']
        common_files=['lammps.in']+model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    machine,resources,command,group_size=util.get_machine_info(mdata,task_type)
    if reprod_opt:
        for ii in work_path:
            run_tasks=[]
            for jj in run_tasks_:
                if ii in jj:
                    run_tasks.append(os.path.basename(jj))
            disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
            disp.run_jobs(resources,
                          command,
                          ii,
                          run_tasks,
                          group_size,
                          common_files,
                          forward_files,
                          backward_files,
                          outlog=task_type+'.out',
                          errlog=task_type+'.err')
    else:
        run_tasks = util.collect_task(all_task,task_type)
        if len(run_tasks)==0: return
        else:
            run_tasks = [os.path.basename(ii) for ii in all_task]
            disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
            disp.run_jobs(resources,
                        command,
                        work_path,
                        run_tasks,
                        group_size,
                        common_files,
                        forward_files,
                        backward_files,
                        outlog=task_type+'.out',
                        errlog=task_type+'.err')
Esempio n. 19
0
    shutil.copyfile(os.path.join(pwd,'POTCAR'),'POTCAR')
    shutil.copyfile(os.path.join(pwd,'KPOINTS'),'KPOINTS')
os.chdir(pwd)
#os._exit(0)
fp_tasks = glob(os.path.join(work_path, 'task.*'))
fp_tasks.sort()
run_tasks = [os.path.basename(ii) for ii in fp_tasks]
#----------------------------------------------------
forward_files = ['POSCAR', 'INCAR', 'POTCAR','KPOINTS']
backward_files = ['OUTCAR','vasprun.xml','CONTCAR']
forward_common_files=[]
mark_failure =False
log_file='runlog'
err_file='errlog'
mdata=loadfn('machine.json')
mdata  = decide_fp_machine(mdata)
#dumpfn(mdata,'new.json',indent=4)
fp_command = mdata['fp_command']
fp_group_size = mdata['fp_group_size']
#---------------------------------------------------


dispatcher = make_dispatcher(mdata['fp_machine'], 
                             mdata_resource=mdata['fp_resources'],
                             work_path=work_path, 
                             run_tasks=run_tasks, 
                             group_size=fp_group_size)
dispatcher.run_jobs(mdata['fp_resources'],
                        [fp_command],
                        work_path,
                        run_tasks,
Esempio n. 20
0
def run_property(confs, inter_param, property_list, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = glob.glob(confs)
    conf_dirs.sort()
    task_list = []
    work_path_list = []
    for ii in conf_dirs:
        for jj in property_list:
            # determine the suffix: from scratch or refine
            # ...
            if 'init_from_suffix' and 'output_suffix' in jj:
                suffix = jj['output_suffix']
            else:
                suffix = 0

            property_type = jj['type']
            path_to_work = os.path.join(ii, property_type + '_' + suffix)

            work_path_list.append(path_to_work)
            tmp_task_list = glob.glob(
                os.path.join(path_to_work, 'task.[0-9]*[0-9]'))
            tmp_task_list.sort()
            task_list.append(tmp_task_list)

    # dispatch the tasks
    forward_files, forward_common_files, backward_files = make_task_trans_files(
        inter_param)
    #    backward_files += logs
    # ...
    task_type = inter_param['type']
    # vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
    else:
        raise RuntimeError("unknown task %s, something wrong" % task_type)

    for ii in range(len(work_path_list)):
        work_path = work_path_list[ii]
        all_task = task_list[ii]
        run_tasks = util.collect_task(all_task, task_type)
        if len(run_tasks) == 0:
            return
        else:
            run_tasks = [os.path.basename(ii) for ii in all_task]
            machine, resources, command, group_size = util.get_machine_info(
                mdata, task_type)
            disp = make_dispatcher(machine, resources, work_path, run_tasks,
                                   group_size)
            disp.run_jobs(resources,
                          command,
                          work_path,
                          run_tasks,
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog=task_type + '.out',
                          errlog=task_type + '.err')
Esempio n. 21
0
def run_elastic(task_type, jdata, mdata, ssh_sess):
    conf_dir = jdata['conf_dir']
    fp_params = jdata['vasp_params']
    kspacing = fp_params['kspacing']

    conf_path = os.path.abspath(conf_dir)
    task_path = re.sub('confs', '02.elastic', conf_path)
    if task_type == "vasp":
        work_path = os.path.join(task_path, 'vasp-k%.2f' % kspacing)
    elif task_type in lammps_task_type:
        work_path = os.path.join(task_path, task_type)
    assert (os.path.isdir(work_path))
    print(work_path)

    all_task = glob.glob(os.path.join(work_path, "dfm-*"))
    all_task.sort()

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
        vasp_exec = mdata['fp_command']
        group_size = mdata['fp_group_size']
        resources = mdata['fp_resources']
        machine = mdata['fp_machine']
        machine_type = mdata['fp_machine']['machine_type']
        command = vasp_exec
        command = cmd_append_log(command, "log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'OUTCAR')
            if os.path.isfile(fres):
                if not vasp.check_finished(fres):
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        forward_files = ['INCAR', 'POSCAR', 'POTCAR', 'KPOINTS']
        backward_files = ['OUTCAR', 'CONTCAR', 'OSZICAR']
        common_files = ['INCAR', 'POTCAR', 'KPOINTS']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
        lmp_exec = mdata['lmp_command']
        group_size = mdata['model_devi_group_size']
        resources = mdata['model_devi_resources']
        machine = mdata['model_devi_machine']
        machine_type = mdata['model_devi_machine']['machine_type']
        command = lmp_exec + " -i lammps.in"
        command = cmd_append_log(command, "model_devi.log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'log.lammps')
            if os.path.isfile(fres):
                with open(fres, 'r') as fp:
                    lines = fp.read().split('\n')
                flag = False
                for jj in lines:
                    if ('Final Stress' in jj) and (not 'print' in jj):
                        flag = True
                if not flag:
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name = fp_params['model_name']
        if not model_name:
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir, ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in', 'strain.out'] + model_name
        backward_files = ['log.lammps', 'model_devi.log']
        common_files = ['lammps.in'] + model_name

        if len(model_name) > 1:
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)

    run_tasks = [os.path.basename(ii) for ii in run_tasks_]
    _run(machine, machine_type, ssh_sess, resources, command, work_path,
         run_tasks, group_size, common_files, forward_files, backward_files)
Esempio n. 22
0
def run_interstitial(task_type, jdata, mdata, ssh_sess):
    conf_dir = jdata['conf_dir']
    fp_params = jdata['vasp_params']
    kspacing = fp_params['kspacing']
    reprod_opt = jdata['reprod-opt']

    conf_path = os.path.abspath(conf_dir)
    task_path = re.sub('confs', '04.interstitial', conf_path)
    if task_type == "vasp":
        work_path = os.path.join(task_path, 'vasp-k%.2f' % kspacing)
    elif task_type in lammps_task_type:
        work_path = os.path.join(task_path, task_type)
        if reprod_opt:
            work_path = os.path.join(task_path,
                                     '%s-reprod-k%.2f' % (task_type, kspacing))
    assert (os.path.isdir(work_path))

    all_task = glob.glob(os.path.join(work_path, 'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
        vasp_exec = mdata['fp_command']
        group_size = mdata['fp_group_size']
        resources = mdata['fp_resources']
        machine = mdata['fp_machine']
        machine_type = mdata['fp_machine']['machine_type']
        command = vasp_exec
        command = cmd_append_log(command, "log")

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'OUTCAR')
            if os.path.isfile(fres):
                if not vasp.check_finished(fres):
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        forward_files = ['INCAR', 'POSCAR', 'POTCAR']
        backward_files = ['OUTCAR', 'XDATCAR', 'OSZICAR']
        common_files = ['INCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
        lmp_exec = mdata['lmp_command']
        group_size = mdata['model_devi_group_size']
        resources = mdata['model_devi_resources']
        machine = mdata['model_devi_machine']
        machine_type = mdata['model_devi_machine']['machine_type']
        command = lmp_exec + " -i lammps.in"
        command = cmd_append_log(command, "model_devi.log")

        if reprod_opt:
            all_frame = []
            for ii in all_task:
                all_frame += (glob.glob(os.path.join(ii, 'frame.*')))
            work_path = all_task
            all_task = all_frame

        run_tasks_ = []
        for ii in all_task:
            fres = os.path.join(ii, 'log.lammps')
            if os.path.isfile(fres):
                with open(fres, 'r') as fp:
                    lines = fp.read().split('\n')
                flag = False
                for jj in lines:
                    if ("Final energy per atoms"
                            in jj) and (not 'print' in jj):
                        flag = True
                if not flag:
                    run_tasks_.append(ii)
            else:
                run_tasks_.append(ii)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name = fp_params['model_name']
        if not model_name:
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir, ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in'] + model_name
        backward_files = ['log.lammps', 'model_devi.log']
        common_files = ['lammps.in'] + model_name

        if len(model_name) > 1:
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError("unknow task %s, something wrong" % task_type)

    if reprod_opt:
        for ii in work_path:
            run_tasks = []
            for jj in run_tasks_:
                if ii in jj:
                    run_tasks.append(os.path.basename(jj))
            _run(machine, machine_type, ssh_sess, resources, command, ii,
                 run_tasks, group_size, common_files, forward_files,
                 backward_files)
    else:
        run_tasks = [os.path.basename(ii) for ii in run_tasks_]
        _run(machine, machine_type, ssh_sess, resources, command, work_path,
             run_tasks, group_size, common_files, forward_files,
             backward_files)