Ejemplo n.º 1
0
def run_vasp_relax(jdata, mdata):
    fp_command = mdata['fp_command']
    fp_group_size = mdata['fp_group_size']
    fp_resources = mdata['fp_resources']
    #machine_type = mdata['fp_machine']['machine_type']
    work_dir = os.path.join(jdata['out_dir'], global_dirname_02)
    
    forward_files = ["POSCAR", "INCAR", "POTCAR"]
    backward_files = ["OUTCAR","CONTCAR"]
    forward_common_files = []
    if 'cvasp' in mdata['fp_resources']:
        if mdata['fp_resources']['cvasp']:
            forward_common_files=['cvasp.py']
    relax_tasks = glob.glob(os.path.join(work_dir, "sys-*"))
    relax_tasks.sort()
    #dlog.info("work_dir",work_dir)
    #dlog.info("relax_tasks",relax_tasks)
    if len(relax_tasks) == 0:
        return

    relax_run_tasks = relax_tasks
    #for ii in relax_tasks : 
    #    if not _vasp_check_fin(ii):
    #        relax_run_tasks.append(ii)
    run_tasks = [os.path.basename(ii) for ii in relax_run_tasks]
    dispatcher = make_dispatcher(mdata['fp_machine'], mdata['fp_resources'], work_dir, run_tasks, fp_group_size)
    #dlog.info(run_tasks)
    dispatcher.run_jobs(fp_resources,
                       [fp_command],
                       work_dir,
                       run_tasks,
                       fp_group_size,
                       forward_common_files,
                       forward_files,
                       backward_files)
Ejemplo n.º 2
0
def run_phonon(task_type,jdata,mdata):
    user= ('user_incar' in jdata.keys())
    work_path=util.make_work_path(jdata,'06.phonon',False,False,user)

    all_task = glob.glob(os.path.join(work_path,'.'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)
        machine,resources,command,group_size=util.get_machine_info(mdata,task_type)

        run_tasks = util.collect_task(all_task,task_type)
        forward_files = ['INCAR', 'POTCAR','KPOINTS','KPOINTS']
        backward_files = ['OUTCAR',  task_type+'.out' , 'OSZICAR','vasprun.xml']
        common_files=['POSCAR']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')

        disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
        disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  common_files,
                  forward_files,
                  backward_files,
                  outlog=task_type+'.out',
                  errlog=task_type+'.err')
    #lammps
    elif task_type in lammps_task_type:
        None
    else:
        raise RuntimeError ("unknown task %s, something wrong" % task_type)
Ejemplo n.º 3
0
def run_model_devi(iter_index, jdata, mdata):
    """submit dp test tasks"""
    iter_name = make_iter_name(iter_index)
    work_path = os.path.join(iter_name, model_devi_name)
    # generate command
    commands = []
    tasks = glob.glob(os.path.join(work_path, "task.*"))
    run_tasks = [os.path.basename(ii) for ii in tasks]
    # get models
    models = glob.glob(os.path.join(work_path, "graph*pb"))
    model_names = [os.path.basename(ii) for ii in models]
    task_model_list = []
    for ii in model_names:
        task_model_list.append(os.path.join('..', ii))
    # get max data size
    data_size = max([
        len(dpdata.System(os.path.join(task, rest_data_name),
                          fmt="deepmd/npy")) for task in tasks
    ])
    # models
    commands = []
    detail_file_names = []
    for ii, mm in enumerate(task_model_list):
        detail_file_name = "{prefix}.{ii}".format(
            prefix=detail_file_name_prefix,
            ii=ii,
        )
        # TODO: support 0.x?
        command = "{python} -m deepmd test -m {model} -s {system} -n {numb_test} -d {detail_file}".format(
            python=mdata['python_test_path'],
            model=mm,
            system=rest_data_name,
            numb_test=data_size,
            detail_file=detail_file_name,
        )
        commands.append(command)
        detail_file_names.append(detail_file_name)
    # submit
    try:
        model_devi_group_size = mdata['model_devi_group_size']
    except:
        model_devi_group_size = 1

    forward_files = [rest_data_name]
    backward_files = sum([[pf + ".e.out", pf + ".f.out", pf + ".v.out"]
                          for pf in detail_file_names], [])

    dispatcher = make_dispatcher(mdata['model_devi_machine'],
                                 mdata['model_devi_resources'], work_path,
                                 run_tasks, model_devi_group_size)
    dispatcher.run_jobs(mdata['model_devi_resources'],
                        commands,
                        work_path,
                        run_tasks,
                        model_devi_group_size,
                        model_names,
                        forward_files,
                        backward_files,
                        outlog='model_devi.log',
                        errlog='model_devi.log')
Ejemplo n.º 4
0
def gen_init_reaction(args):
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
        if args.MACHINE is not None:
            mdata = loadfn(args.MACHINE)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)
        if args.MACHINE is not None:
            with open(args.MACHINE, "r") as fp:
                mdata = json.load(fp)

    record = "record.reaction"
    iter_rec = -1
    numb_task = 7
    if os.path.isfile(record):
        with open(record) as frec:
            for line in frec:
                iter_rec = int(line.strip())
        dlog.info("continue from task %02d" % iter_rec)
    for ii in range(numb_task):
        sepline(str(ii), '-')
        if ii <= iter_rec:
            continue
        elif ii == 0:
            link_reaxff(jdata)
        elif ii == 1:
            dispatcher = make_dispatcher(mdata["reaxff_machine"])
            run_reaxff(jdata, mdata, dispatcher)
        elif ii == 2:
            link_trj(jdata)
        elif ii == 3:
            dispatcher = make_dispatcher(mdata["build_machine"])
            run_build_dataset(jdata, mdata, dispatcher)
        elif ii == 4:
            link_fp_input()
        elif ii == 5:
            dispatcher = make_dispatcher(mdata["fp_machine"])
            run_fp(jdata, mdata, dispatcher)
        elif ii == 6:
            convert_data(jdata)
        with open(record, "a") as frec:
            frec.write(str(ii) + '\n')
Ejemplo n.º 5
0
def run_equi(task_type,jdata,mdata):
        #rmprint("This module has been run !")

    work_path=util.make_work_path(jdata,'00.equi',False,False,False)
    all_task = glob.glob(os.path.join(work_path,'.'))

    #vasp
    if task_type=="vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POTCAR', 'KPOINTS']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')
        backward_files = ['OUTCAR', task_type+'.out' , 'CONTCAR','OSZICAR']
        common_files=['POSCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        forward_files = ['conf.lmp', 'lammps.in']
        backward_files = ['dump.relax','log.lammps', task_type+'.out']

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        common_files = model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task,task_type)
    if len(run_tasks)==0: return
    machine,resources,command,group_size=util.get_machine_info(mdata,task_type)
    disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
    #debug#
    #print(' '.join(common_files))
    #print(' '.join(forward_files))
    #print(' '.join(backward_files))
    disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  common_files,
                  forward_files,
                  backward_files,
                  outlog=task_type+'.out',
                  errlog=task_type+'.err')
Ejemplo n.º 6
0
def run_vasp_relax(jdata, mdata):
    fp_command = mdata['fp_command']
    fp_group_size = mdata['fp_group_size']
    fp_resources = mdata['fp_resources']
    #machine_type = mdata['fp_machine']['machine_type']
    work_dir = os.path.join(jdata['out_dir'], global_dirname_02)
    
    forward_files = ["POSCAR", "INCAR", "POTCAR"]
    user_forward_files = mdata.get("fp" + "_user_forward_files", [])
    forward_files += [os.path.basename(file) for file in user_forward_files]
    backward_files = ["OUTCAR","CONTCAR"]
    backward_files += mdata.get("fp" + "_user_backward_files", [])
    forward_common_files = []
    if 'cvasp' in mdata['fp_resources']:
        if mdata['fp_resources']['cvasp']:
            forward_files +=['cvasp.py']
    relax_tasks = glob.glob(os.path.join(work_dir, "sys-*"))
    relax_tasks.sort()
    #dlog.info("work_dir",work_dir)
    #dlog.info("relax_tasks",relax_tasks)
    if len(relax_tasks) == 0:
        return

    relax_run_tasks = relax_tasks
    #for ii in relax_tasks : 
    #    if not _vasp_check_fin(ii):
    #        relax_run_tasks.append(ii)
    run_tasks = [os.path.basename(ii) for ii in relax_run_tasks]

    api_version = mdata.get('api_version', '0.9')
    if LooseVersion(api_version) < LooseVersion('1.0'):
        warnings.warn(f"the dpdispatcher will be updated to new version."
            f"And the interface may be changed. Please check the documents for more details")
        dispatcher = make_dispatcher(mdata['fp_machine'], mdata['fp_resources'], work_dir, run_tasks, fp_group_size)
        dispatcher.run_jobs(fp_resources,
                       [fp_command],
                       work_dir,
                       run_tasks,
                       fp_group_size,
                       forward_common_files,
                       forward_files,
                       backward_files)

    elif LooseVersion(api_version) >= LooseVersion('1.0'):
        submission = make_submission(
            mdata['fp_machine'],
            mdata['fp_resources'],
            commands=[fp_command],
            work_path=work_dir,
            run_tasks=run_tasks,
            group_size=fp_group_size,
            forward_common_files=forward_common_files,
            forward_files=forward_files,
            backward_files=backward_files,
            outlog = 'fp.log',
            errlog = 'fp.log')
        submission.run_submission()
Ejemplo n.º 7
0
def run_surf(task_type,jdata,mdata):
    static=jdata['static-opt']
    work_path=util.make_work_path(jdata,'05.surf',False,static,False)

    all_task = glob.glob(os.path.join(work_path,'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POSCAR','POTCAR','KPOINTS']
        backward_files = ['OUTCAR',  task_type+'.out' , 'OSZICAR']
        common_files=['INCAR','POTCAR']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in']+model_name
        backward_files = ['log.lammps',task_type+'.out']
        common_files=['lammps.in']+model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task,task_type)
    if len(run_tasks)==0: return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine,resources,command,group_size=util.get_machine_info(mdata,task_type)
        disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
        disp.run_jobs(resources,
                    command,
                    work_path,
                    run_tasks,
                    group_size,
                    common_files,
                    forward_files,
                    backward_files,
                    outlog=task_type+'.out',
                    errlog=task_type+'.err')
Ejemplo n.º 8
0
def run_equi(confs,
             inter_param,
             mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()
    # generate a list of task names like mp-xxx/relaxation/relax_task
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.abspath(os.path.join(ii, 'relaxation')))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, 'relax_task'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = decide_fp_machine(mdata)
    elif inter_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
    else:
        raise RuntimeError("unknown task %s, something wrong" % inter_type)

    # dispatch the tasks
    # POSCAR here is useless
    virtual_calculator = make_calculator(inter_param, "POSCAR")
    forward_files = virtual_calculator.forward_files()
    forward_common_files = virtual_calculator.forward_common_files()
    backward_files = virtual_calculator.backward_files()
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(mdata, inter_type)
        print('%d tasks will be submited '%len(run_tasks))
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]
            disp = make_dispatcher(machine, resources, work_path, [run_tasks[ii]], group_size)
            print("%s --> Runing... "%(work_path))
            disp.run_jobs(resources,
                          command,
                          work_path,
                          [run_tasks[ii]],
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog='outlog',
                          errlog='errlog')
Ejemplo n.º 9
0
def run_vacancy(task_type,jdata,mdata):

    work_path=util.make_work_path(jdata,'03.vacancy',False,False,False)
    all_task = glob.glob(os.path.join(work_path,'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POSCAR','POTCAR']
        backward_files = ['OUTCAR',  'autotest.out' , 'OSZICAR']
        common_files=['INCAR','POTCAR']

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        common_files = model_name
        forward_files = ['conf.lmp', 'lammps.in']+model_name
        backward_files = ['log.lammps','autotest.out']
        common_files=['lammps.in']+model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    run_tasks = util.collect_task(all_task,task_type)
    if len(run_tasks)==0: return
    machine,machine_type,ssh_sess,resources,command,group_size=util.get_machine_info(mdata,task_type)
    disp = make_dispatcher(machine)
    disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  common_files,
                  forward_files,
                  backward_files,
                  outlog='autotest.out',
                  errlog='autotest.err')
Ejemplo n.º 10
0
def run_equi(confs, inter_param, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = glob.glob(confs)
    conf_dirs.sort()
    # generate a list of task names like mp-xxx/relaxation
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.join(ii, 'relaxation'))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, '.'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = decide_fp_machine(mdata)
    elif inter_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
    else:
        raise RuntimeError("unknown task %s, something wrong" % task_type)

    # dispatch the tasks
    forward_files, forward_common_files, backward_files = make_task_trans_files(
        inter_param)
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(
            mdata, inter_type)
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]
            disp = make_dispatcher(machine, resources, work_path,
                                   run_tasks[ii], group_size)
            disp.run_jobs(resources,
                          command,
                          work_path,
                          run_tasks[ii],
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog=inter_type + '.out',
                          errlog=inter_type + '.err')
Ejemplo n.º 11
0
def worker(work_path, all_task, forward_common_files, forward_files,
           backward_files, mdata, inter_type):
    run_tasks = [os.path.basename(ii) for ii in all_task]
    machine, resources, command, group_size = util.get_machine_info(
        mdata, inter_type)
    disp = make_dispatcher(machine, resources, work_path, run_tasks,
                           group_size)
    disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  forward_common_files,
                  forward_files,
                  backward_files,
                  outlog='outlog',
                  errlog='errlog')
Ejemplo n.º 12
0
def run_vasp_md(jdata, mdata):
    fp_command = mdata['fp_command']
    fp_group_size = mdata['fp_group_size']
    fp_resources = mdata['fp_resources']
    #machine_type = mdata['fp_machine']['machine_type']
    work_dir = os.path.join(jdata['out_dir'], global_dirname_04)
    scale = jdata['scale']   
    pert_numb = jdata['pert_numb'] 
    md_nstep = jdata['md_nstep']

    forward_files = ["POSCAR", "INCAR", "POTCAR"]
    backward_files = ["OUTCAR"]
    forward_common_files = []
    if 'cvasp' in mdata['fp_resources']:
        if mdata['fp_resources']['cvasp']:
            forward_common_files=['cvasp.py']

    path_md = work_dir
    path_md = os.path.abspath(path_md)
    cwd = os.getcwd()
    assert(os.path.isdir(path_md)), "md path should exists"
    md_tasks = glob.glob(os.path.join(work_dir, 'sys-*/scale*/00*'))
    md_tasks.sort()

    if len(md_tasks) == 0:
        return

    md_run_tasks = md_tasks
    #for ii in md_tasks : 
    #    if not _vasp_check_fin(ii):
    #        md_run_tasks.append(ii)

    run_tasks = [ii.replace(work_dir+"/", "") for ii in md_run_tasks]
    #dlog.info("md_work_dir", work_dir)
    #dlog.info("run_tasks",run_tasks)
    dispatcher = make_dispatcher(mdata['fp_machine'], mdata['fp_resources'], work_dir, run_tasks, fp_group_size)
    dispatcher.run_jobs(fp_resources,
                       [fp_command],
                       work_dir,
                       run_tasks,
                       fp_group_size,
                       forward_common_files,
                       forward_files,
                       backward_files)
Ejemplo n.º 13
0
def gen_init_surf(args):
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
        if args.MACHINE is not None:
            mdata = loadfn(args.MACHINE)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)
        if args.MACHINE is not None:
            with open(args.MACHINE, "r") as fp:
                mdata = json.load(fp)

    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info("# working dir %s" % out_dir)

    if args.MACHINE is not None:
        # Decide a proper machine
        mdata = decide_fp_machine(mdata)
        disp = make_dispatcher(mdata["fp_machine"])

    #stage = args.STAGE
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1:
            create_path(out_dir)
            make_super_cell_pymatgen(jdata)
            place_element(jdata)
            make_vasp_relax(jdata)
            if args.MACHINE is not None:
                run_vasp_relax(jdata, mdata, disp)
        elif stage == 2:
            make_scale(jdata)
            pert_scaled(jdata)
        else:
            raise RuntimeError("unknown stage %d" % stage)
Ejemplo n.º 14
0
def worker(work_path,
           all_task,
           forward_common_files,
           forward_files,
           backward_files,
           mdata,
           inter_type):
    run_tasks = [os.path.basename(ii) for ii in all_task]
    machine, resources, command, group_size = util.get_machine_info(mdata, inter_type)
    disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
    api_version = mdata.get('api_version', '0.9')
    if LooseVersion(api_version) < LooseVersion('1.0'):
        warnings.warn(f"the dpdispatcher will be updated to new version."
            f"And the interface may be changed. Please check the documents for more details")
        disp.run_jobs(resources,
                  command,
                  work_path,
                  run_tasks,
                  group_size,
                  forward_common_files,
                  forward_files,
                  backward_files,
                  outlog='outlog',
                  errlog='errlog')
    elif LooseVersion(api_version) >= LooseVersion('1.0'):
        submission = make_submission(
                mdata_machine=machine,
                mdata_resource=resources,
                commands=[command],
                work_path=work_path,
                run_tasks=run_tasks,
                group_size=group_size,
                forward_common_files=forward_common_files,
                forward_files=forward_files,
                backward_files=backward_files,
                outlog = 'outlog',
                errlog = 'errlog'
            )
        submission.run_submission()
Ejemplo n.º 15
0
def run_equi(confs, inter_param, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = []
    for conf in confs:
        conf_dirs.extend(glob.glob(conf))
    conf_dirs.sort()
    # generate a list of task names like mp-xxx/relaxation/relax_task
    # ...
    work_path_list = []
    for ii in conf_dirs:
        work_path_list.append(os.path.abspath(os.path.join(ii, 'relaxation')))
    all_task = []
    for ii in work_path_list:
        all_task.append(os.path.join(ii, 'relax_task'))

    inter_type = inter_param['type']
    # vasp
    if inter_type == "vasp":
        mdata = convert_mdata(mdata, ["fp"])
    elif inter_type in lammps_task_type:
        mdata = convert_mdata(mdata, ["model_devi"])
    else:
        raise RuntimeError("unknown task %s, something wrong" % inter_type)

    # dispatch the tasks
    # POSCAR here is useless
    virtual_calculator = make_calculator(inter_param, "POSCAR")
    forward_files = virtual_calculator.forward_files()
    forward_common_files = virtual_calculator.forward_common_files()
    backward_files = virtual_calculator.backward_files()
    #    backward_files += logs
    # ...
    run_tasks = util.collect_task(all_task, inter_type)
    if len(run_tasks) == 0:
        return
    else:
        # if LooseVersion()
        run_tasks = [os.path.basename(ii) for ii in all_task]
        machine, resources, command, group_size = util.get_machine_info(
            mdata, inter_type)
        print('%d tasks will be submited ' % len(run_tasks))
        for ii in range(len(work_path_list)):
            work_path = work_path_list[ii]
            disp = make_dispatcher(machine, resources, work_path,
                                   [run_tasks[ii]], group_size)
            print("%s --> Runing... " % (work_path))

        api_version = mdata.get('api_version', '0.9')
        if LooseVersion(api_version) < LooseVersion('1.0'):
            warnings.warn(
                f"the dpdispatcher will be updated to new version."
                f"And the interface may be changed. Please check the documents for more details"
            )
            disp.run_jobs(resources,
                          command,
                          work_path, [run_tasks[ii]],
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog='outlog',
                          errlog='errlog')
        elif LooseVersion(api_version) >= LooseVersion('1.0'):
            submission = make_submission(
                mdata_machine=machine,
                mdata_resource=resources,
                commands=[command],
                work_path=work_path,
                run_tasks=run_tasks,
                group_size=group_size,
                forward_common_files=forward_common_files,
                forward_files=forward_files,
                backward_files=backward_files,
                outlog='outlog',
                errlog='errlog')
            submission.run_submission()
Ejemplo n.º 16
0
def run_iter(param_file, machine_file):
    """ init (iter 0): init_pick

    tasks (iter > 0):
    00 make_train (same as generator)
    01 run_train (same as generator)
    02 post_train (same as generator)
    03 make_model_devi
    04 run_model_devi
    05 post_model_devi
    06 make_fp
    07 run_fp (same as generator)
    08 post_fp (same as generator)
    """
    # TODO: function of handling input json should be combined as one function
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(param_file)
        mdata = loadfn(machine_file)
    except:
        with open(param_file, 'r') as fp:
            jdata = json.load(fp)
        with open(machine_file, 'r') as fp:
            mdata = json.load(fp)

    if jdata.get('pretty_print', False):
        fparam = SHORT_CMD+'_' + \
            param_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json')
        dumpfn(jdata, fparam, indent=4)
        fmachine = SHORT_CMD+'_' + \
            machine_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json')
        dumpfn(mdata, fmachine, indent=4)

    if mdata.get('handlers', None):
        if mdata['handlers'].get('smtp', None):
            que = queue.Queue(-1)
            queue_handler = logging.handlers.QueueHandler(que)
            smtp_handler = logging.handlers.SMTPHandler(
                **mdata['handlers']['smtp'])
            listener = logging.handlers.QueueListener(que, smtp_handler)
            dlog.addHandler(queue_handler)
            listener.start()

    max_tasks = 10000
    numb_task = 9
    record = "record.dpgen"
    iter_rec = [0, -1]
    if os.path.isfile(record):
        with open(record) as frec:
            for line in frec:
                iter_rec = [int(x) for x in line.split()]
        dlog.info("continue from iter %03d task %02d" %
                  (iter_rec[0], iter_rec[1]))

    cont = True
    ii = -1
    while cont:
        ii += 1
        iter_name = make_iter_name(ii)
        sepline(iter_name, '=')
        for jj in range(numb_task):
            if ii * max_tasks + jj <= iter_rec[0] * max_tasks + iter_rec[1]:
                continue
            task_name = "task %02d" % jj
            sepline("{} {}".format(iter_name, task_name), '-')
            jdata['model_devi_jobs'] = [{} for _ in range(ii + 1)]
            if ii == 0 and jj < 6:
                if jj == 0:
                    log_iter("init_pick", ii, jj)
                    init_pick(ii, jdata, mdata)
                dlog.info("first iter, skip step 1-5")
            elif jj == 0:
                log_iter("make_train", ii, jj)
                make_train(ii, jdata, mdata)
            elif jj == 1:
                log_iter("run_train", ii, jj)
                mdata = decide_train_machine(mdata)
                disp = make_dispatcher(mdata['train_machine'])
                run_train(ii, jdata, mdata)
            elif jj == 2:
                log_iter("post_train", ii, jj)
                post_train(ii, jdata, mdata)
            elif jj == 3:
                log_iter("make_model_devi", ii, jj)
                cont = make_model_devi(ii, jdata, mdata)
                if not cont or ii >= jdata.get("stop_iter", ii + 1):
                    break
            elif jj == 4:
                log_iter("run_model_devi", ii, jj)
                mdata = decide_model_devi_machine(mdata)
                disp = make_dispatcher(mdata['model_devi_machine'])
                run_model_devi(ii, jdata, mdata, disp)
            elif jj == 5:
                log_iter("post_model_devi", ii, jj)
                post_model_devi(ii, jdata, mdata)
            elif jj == 6:
                log_iter("make_fp", ii, jj)
                make_fp(ii, jdata, mdata)
            elif jj == 7:
                log_iter("run_fp", ii, jj)
                if jdata.get("labeled", False):
                    dlog.info("already have labeled data, skip run_fp")
                else:
                    mdata = decide_fp_machine(mdata)
                    disp = make_dispatcher(mdata['fp_machine'])
                    run_fp(ii, jdata, mdata)
            elif jj == 8:
                log_iter("post_fp", ii, jj)
                if jdata.get("labeled", False):
                    dlog.info("already have labeled data, skip post_fp")
                else:
                    post_fp(ii, jdata)
            else:
                raise RuntimeError("unknown task %d, something wrong" % jj)
            record_iter(record, ii, jj)
Ejemplo n.º 17
0
def run_property(confs, inter_param, property_list, mdata):
    # find all POSCARs and their name like mp-xxx
    # ...
    conf_dirs = glob.glob(confs)
    conf_dirs.sort()
    task_list = []
    work_path_list = []
    for ii in conf_dirs:
        for jj in property_list:
            # determine the suffix: from scratch or refine
            # ...
            if 'init_from_suffix' and 'output_suffix' in jj:
                suffix = jj['output_suffix']
            else:
                suffix = 0

            property_type = jj['type']
            path_to_work = os.path.join(ii, property_type + '_' + suffix)

            work_path_list.append(path_to_work)
            tmp_task_list = glob.glob(
                os.path.join(path_to_work, 'task.[0-9]*[0-9]'))
            tmp_task_list.sort()
            task_list.append(tmp_task_list)

    # dispatch the tasks
    forward_files, forward_common_files, backward_files = make_task_trans_files(
        inter_param)
    #    backward_files += logs
    # ...
    task_type = inter_param['type']
    # vasp
    if task_type == "vasp":
        mdata = decide_fp_machine(mdata)
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)
    else:
        raise RuntimeError("unknown task %s, something wrong" % task_type)

    for ii in range(len(work_path_list)):
        work_path = work_path_list[ii]
        all_task = task_list[ii]
        run_tasks = util.collect_task(all_task, task_type)
        if len(run_tasks) == 0:
            return
        else:
            run_tasks = [os.path.basename(ii) for ii in all_task]
            machine, resources, command, group_size = util.get_machine_info(
                mdata, task_type)
            disp = make_dispatcher(machine, resources, work_path, run_tasks,
                                   group_size)
            disp.run_jobs(resources,
                          command,
                          work_path,
                          run_tasks,
                          group_size,
                          forward_common_files,
                          forward_files,
                          backward_files,
                          outlog=task_type + '.out',
                          errlog=task_type + '.err')
Ejemplo n.º 18
0
def run_interstitial(task_type,jdata,mdata):

    reprod_opt=jdata['reprod-opt']
    work_path=util.make_work_path(jdata,'04.interstitial',reprod_opt,False,False)
    all_task = glob.glob(os.path.join(work_path,'struct-*'))

    #vasp
    if task_type == "vasp":
        mdata=decide_fp_machine(mdata)

        forward_files = ['INCAR', 'POSCAR','POTCAR',"KPOINTS"]
        backward_files = ['OUTCAR',  task_type+'.out' , 'XDATCAR','OSZICAR']
        common_files=['INCAR']
        if ('cvasp' in jdata) and (jdata['cvasp'] == True):
           mdata['fp_resources']['cvasp'] = True
           forward_files.append('cvasp.py')

    #lammps
    elif task_type in lammps_task_type:
        mdata = decide_model_devi_machine(mdata)

        if reprod_opt:
            all_frame=[]
            for ii in all_task:
                all_frame+=(glob.glob(os.path.join(ii,'frame.*')))
            work_path = all_task
            all_task = all_frame

        run_tasks_ = []
        for ii in all_task:
            # fres = os.path.join(ii, 'log.lammps')
            # if os.path.isfile(fres) :
            #     if not lammps.check_finished(fres):
            #         run_tasks_.append(ii)
            # else :
            #     run_tasks_.append(ii)
            run_tasks_.append(ii)

        fp_params = jdata['lammps_params']
        model_dir = fp_params['model_dir']
        model_dir = os.path.abspath(model_dir)
        model_name =fp_params['model_name']
        if not model_name :
            models = glob.glob(os.path.join(model_dir, '*pb'))
            model_name = [os.path.basename(ii) for ii in models]
        else:
            models = [os.path.join(model_dir,ii) for ii in model_name]
        forward_files = ['conf.lmp', 'lammps.in']+model_name
        backward_files = ['log.lammps', task_type+'.out']
        common_files=['lammps.in']+model_name

        if len(model_name)>1 and task_type == 'deepmd':
            backward_files = backward_files + ['model_devi.out']

    else:
        raise RuntimeError ("unknow task %s, something wrong" % task_type)

    machine,resources,command,group_size=util.get_machine_info(mdata,task_type)
    if reprod_opt:
        for ii in work_path:
            run_tasks=[]
            for jj in run_tasks_:
                if ii in jj:
                    run_tasks.append(os.path.basename(jj))
            disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
            disp.run_jobs(resources,
                          command,
                          ii,
                          run_tasks,
                          group_size,
                          common_files,
                          forward_files,
                          backward_files,
                          outlog=task_type+'.out',
                          errlog=task_type+'.err')
    else:
        run_tasks = util.collect_task(all_task,task_type)
        if len(run_tasks)==0: return
        else:
            run_tasks = [os.path.basename(ii) for ii in all_task]
            disp = make_dispatcher(machine, resources, work_path, run_tasks, group_size)
            disp.run_jobs(resources,
                        command,
                        work_path,
                        run_tasks,
                        group_size,
                        common_files,
                        forward_files,
                        backward_files,
                        outlog=task_type+'.out',
                        errlog=task_type+'.err')
Ejemplo n.º 19
0
backward_files = ['OUTCAR','vasprun.xml','CONTCAR']
forward_common_files=[]
mark_failure =False
log_file='runlog'
err_file='errlog'
mdata=loadfn('machine.json')
mdata  = decide_fp_machine(mdata)
#dumpfn(mdata,'new.json',indent=4)
fp_command = mdata['fp_command']
fp_group_size = mdata['fp_group_size']
#---------------------------------------------------


dispatcher = make_dispatcher(mdata['fp_machine'], 
                             mdata_resource=mdata['fp_resources'],
                             work_path=work_path, 
                             run_tasks=run_tasks, 
                             group_size=fp_group_size)
dispatcher.run_jobs(mdata['fp_resources'],
                        [fp_command],
                        work_path,
                        run_tasks,
                        fp_group_size,
                        forward_common_files,
                        forward_files,
                        backward_files,
                        mark_failure=mark_failure,
                        outlog = log_file,
                        errlog = err_file)

Ejemplo n.º 20
0
Archivo: gen.py Proyecto: y1xiaoc/dpgen
def gen_init_bulk(args) :
    try:
       import ruamel
       from monty.serialization import loadfn,dumpfn
       warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
       jdata=loadfn(args.PARAM)
       if args.MACHINE is not None:
          mdata=loadfn(args.MACHINE)
    except:
       with open (args.PARAM, 'r') as fp :
           jdata = json.load (fp)
       if args.MACHINE is not None:
          with open (args.MACHINE, "r") as fp:
              mdata = json.load(fp)

    if args.MACHINE is not None:
       # Selecting a proper machine
       mdata = decide_fp_machine(mdata)
       disp = make_dispatcher(mdata["fp_machine"])

    # Decide work path
    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info ("# working dir %s" % out_dir)
    # Decide whether to use a given poscar
    from_poscar = False 
    if 'from_poscar' in jdata :
        from_poscar = jdata['from_poscar']
    # Verify md_nstep
    md_nstep_jdata = jdata["md_nstep"]
    try:
        md_incar = jdata['md_incar']
        if os.path.isfile(md_incar):
            with open(md_incar , "r") as fr:
                md_incar_lines = fr.readlines()
            nsw_flag = False
            for incar_line in md_incar_lines:
                line = incar_line.split()
                if "NSW" in line:
                    nsw_flag = True
                    nsw_steps = int(incar_line.split()[-1])
                    break
            #dlog.info("nsw_steps is", nsw_steps)
            #dlog.info("md_nstep_jdata is", md_nstep_jdata)
            if nsw_flag:
                if (nsw_steps != md_nstep_jdata):
                    dlog.info("WARNING: your set-up for MD steps in PARAM and md_incar are not consistent!")
                    dlog.info("MD steps in PARAM is %d"%(md_nstep_jdata))
                    dlog.info("MD steps in md_incar is %d"%(nsw_steps))
                    dlog.info("DP-GEN will use settings in md_incar!")
                    jdata['md_nstep'] = nsw_steps
    except:
        pass
    ## correct element name 
    temp_elements = []
    for ele in jdata['elements']:
        temp_elements.append(ele[0].upper() + ele[1:])
    jdata['elements'] = temp_elements
    dlog.info("Elements are %s"% ' '.join(jdata['elements']))

    ## Iteration 
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1 :
            dlog.info("Current stage is 1, relax")
            create_path(out_dir)
            shutil.copy2(args.PARAM, os.path.join(out_dir, 'param.json'))
            if from_poscar :
                make_super_cell_poscar(jdata)
            else :
                make_unit_cell(jdata)
                make_super_cell(jdata)
                place_element(jdata)
            if args.MACHINE is not None:
               make_vasp_relax(jdata, mdata)
               run_vasp_relax(jdata, mdata, disp)
            else:
               make_vasp_relax(jdata, {"fp_resources":{}})
        elif stage == 2 :
            dlog.info("Current stage is 2, perturb and scale")
            make_scale(jdata)
            pert_scaled(jdata)
        elif stage == 3 :
            dlog.info("Current stage is 3, run a short md")
            make_vasp_md(jdata)
            if args.MACHINE is not None:
               run_vasp_md(jdata, mdata, disp)
        elif stage == 4 :
            dlog.info("Current stage is 4, collect data")
            coll_vasp_md(jdata)
        else :
            raise RuntimeError("unknown stage %d" % stage)
Ejemplo n.º 21
0
def run_vasp_md(jdata, mdata):
    fp_command = mdata['fp_command']
    fp_group_size = mdata['fp_group_size']
    fp_resources = mdata['fp_resources']
    #machine_type = mdata['fp_machine']['machine_type']
    work_dir = os.path.join(jdata['out_dir'], global_dirname_04)
    scale = jdata['scale']   
    pert_numb = jdata['pert_numb'] 
    md_nstep = jdata['md_nstep']

    forward_files = ["POSCAR", "INCAR", "POTCAR"]
    user_forward_files = mdata.get("fp" + "_user_forward_files", [])
    forward_files += [os.path.basename(file) for file in user_forward_files]
    backward_files = ["OUTCAR"]
    backward_files += mdata.get("fp" + "_user_backward_files", [])
    forward_common_files = []
    if 'cvasp' in mdata['fp_resources']:
        if mdata['fp_resources']['cvasp']:
            forward_files +=['cvasp.py']

    path_md = work_dir
    path_md = os.path.abspath(path_md)
    cwd = os.getcwd()
    assert(os.path.isdir(path_md)), "md path should exists"
    md_tasks = glob.glob(os.path.join(work_dir, 'sys-*/scale*/00*'))
    md_tasks.sort()

    if len(md_tasks) == 0:
        return

    md_run_tasks = md_tasks
    #for ii in md_tasks : 
    #    if not _vasp_check_fin(ii):
    #        md_run_tasks.append(ii)

    run_tasks = [ii.replace(work_dir+"/", "") for ii in md_run_tasks]
    #dlog.info("md_work_dir", work_dir)
    #dlog.info("run_tasks",run_tasks)
    api_version = mdata.get('api_version', '0.9')
    if LooseVersion(api_version) < LooseVersion('1.0'):
        warnings.warn(f"the dpdispatcher will be updated to new version."
            f"And the interface may be changed. Please check the documents for more details")
        dispatcher = make_dispatcher(mdata['fp_machine'], mdata['fp_resources'], work_dir, run_tasks, fp_group_size)
        dispatcher.run_jobs(fp_resources,
                       [fp_command],
                       work_dir,
                       run_tasks,
                       fp_group_size,
                       forward_common_files,
                       forward_files,
                       backward_files)

    elif LooseVersion(api_version) >= LooseVersion('1.0'):
        submission = make_submission(
            mdata['fp_machine'],
            mdata['fp_resources'],
            commands=[fp_command],
            work_path=work_dir,
            run_tasks=run_tasks,
            group_size=fp_group_size,
            forward_common_files=forward_common_files,
            forward_files=forward_files,
            backward_files=backward_files,
            outlog = 'fp.log',
            errlog = 'fp.log')
        submission.run_submission()