Beispiel #1
0
    def __init__(self, task_dict_list, submission_dict, machine_name,
                 resource_dict, **kwargs):

        task_list = [Task(**task_dict) for task_dict in task_dict_list]

        machine_dict = settings['POOL'][machine_name]
        machine = Machine.load_from_dict(machine_dict['machine'])
        resources = Resources.load_from_dict(resource_dict)

        self.submission = Submission(machine=machine,
                                     resources=resources,
                                     task_list=task_list,
                                     **submission_dict)
Beispiel #2
0
def post_enhc(iter_index, json_file, machine_json, base_dir="./"):
    base_dir = os.path.abspath(base_dir) + "/"
    iter_name = make_iter_name(iter_index)
    work_path = base_dir + iter_name + "/" + enhc_name + "/"
    json_file = os.path.abspath(json_file)
    json_file = os.path.abspath(json_file)
    fp = open(json_file, 'r')
    jdata = json.load(fp)
    fp.close()
    gmx_split = jdata["gmx_split_traj"]
    gmx_split_log = "gmx_split.log"
    gmx_split_cmd = cmd_append_log(gmx_split, gmx_split_log)

    all_task = list(
        filter(lambda x: os.path.isdir(x),
               glob.glob(work_path + "/[0-9]*[0-9]")))
    all_task.sort()

    cwd = os.getcwd()
    numb_walkers = jdata["numb_walkers"]
    for ii in range(numb_walkers):
        walker_path = work_path + make_walker_name(ii) + "/"
        os.chdir(walker_path)
        if os.path.isdir("confs"):
            shutil.rmtree("confs")
        os.makedirs("confs")
        os.chdir(cwd)

    print('rid.py:post_enhc:gmx_split_cmd', gmx_split_cmd)
    print('rid.py:post_enhc:work path', work_path)

    machine = set_machine(machine_json, target="post_enhc")
    resources = set_resource(machine_json, target="post_enhc")
    all_task_relpath = [os.path.relpath(ii, work_path) for ii in all_task]
    gmx_split_task = [
        Task(command=gmx_split_cmd,
             task_work_path=ii,
             outlog='gmx_split.log',
             errlog='gmx_split.log') for ii in all_task_relpath
    ]
    gmx_split_submission = Submission(work_base=work_path,
                                      resources=resources,
                                      machine=machine,
                                      task_list=gmx_split_task)
    gmx_split_submission.run_submission()

    for ii in range(numb_walkers):
        walker_path = work_path + make_walker_name(ii) + "/"
        angles = np.loadtxt(walker_path + enhc_out_plm)
        np.savetxt(walker_path + enhc_out_angle, angles[:, 1:], fmt="%.6f")
    print("Post process of enhanced sampling finished.")
Beispiel #3
0
def run_enhc(iter_index,
             json_file,
             machine_json,
             base_dir='./'):
    json_file = os.path.abspath(json_file)
    base_dir = os.path.abspath(base_dir) + "/"
    iter_name = make_iter_name(iter_index)
    work_path = base_dir + iter_name + "/" + enhc_name + "/"

    fp = open(json_file, 'r')
    jdata = json.load(fp)
    fp.close()
    gmx_prep = jdata["gmx_prep"]
    gmx_run = jdata["gmx_run"]
    enhc_thread = jdata["enhc_thread"]
    gmx_run = gmx_run + (" -nt %d" % enhc_thread)
    gmx_prep_log = "gmx_grompp.log"
    gmx_run_log = "gmx_mdrun.log"
    # assuming at least one walker
    graph_files = glob.glob(work_path + (make_walker_name(0)) + "/*.pb")
    if len(graph_files) != 0:
        gmx_run = gmx_run + " -plumed " + enhc_plm
    else:
        gmx_run = gmx_run + " -plumed " + enhc_bf_plm
    gmx_prep_cmd = cmd_append_log(gmx_prep, gmx_prep_log)
    gmx_run_cmd = cmd_append_log(gmx_run, gmx_run_log)
    numb_walkers = jdata["numb_walkers"]

    all_task = list(filter(lambda x: os.path.isdir(
        x),  glob.glob(work_path + "/[0-9]*[0-9]")))
    all_task.sort()

    all_task_basedir = [os.path.relpath(ii, work_path) for ii in all_task]
    print('run_enhc:work_path', work_path)
    print('run_enhc:gmx_prep_cmd:', gmx_prep_cmd)
    print('run_enhc:gmx_run_cmd:', gmx_run_cmd)
    print('run_enhc:all_task:', all_task)
    print('run_enhc:all_task_basedir:', all_task_basedir)

    machine = set_machine(machine_json, target="enhcMD")
    resources = set_resource(machine_json, target="enhcMD")

    gmx_prep_task = [Task(command=gmx_prep_cmd, task_work_path=ii,
                          outlog='gmx_grompp.log', errlog='gmx_grompp.log') for ii in all_task_basedir]
    gmx_prep_submission = Submission(
        work_base=work_path, machine=machine, resources=resources, task_list=gmx_prep_task)

    gmx_prep_submission.run_submission()

    gmx_run_task = [Task(command=gmx_run_cmd, task_work_path=ii,
                         outlog='gmx_mdrun.log', errlog='gmx_mdrun.log') for ii in all_task_basedir]
    gmx_run_submission = Submission(
        work_base=work_path, machine=machine, resources=resources, task_list=gmx_run_task)
    gmx_run_submission.run_submission()
Beispiel #4
0
class JobFactory(object):
    def __init__(self, task_dict_list, submission_dict, machine_name,
                 resource_dict, **kwargs):

        task_list = [Task(**task_dict) for task_dict in task_dict_list]

        machine_dict = settings['POOL'][machine_name]
        machine = Machine.load_from_dict(machine_dict['machine'])
        resources = Resources.load_from_dict(resource_dict)

        self.submission = Submission(machine=machine,
                                     resources=resources,
                                     task_list=task_list,
                                     **submission_dict)

    def run_submission(self):
        _origin = os.getcwd()
        self.submission.run_submission()
        os.chdir(_origin)
Beispiel #5
0
def get_empty_submission(job_work_dir):
    machine_file = os.path.join(job_work_dir, '../', '../', '../',
                                'machine.json')
    with open(machine_file, 'r') as f:
        mdata = json.load(f)

    machine = Machine.load_from_dict(mdata['machine'])
    resources = Resources.load_from_dict(mdata['resources'])

    submission = Submission(
        work_base=job_work_dir,
        resources=resources,
        machine=machine,
    )
    return submission
Beispiel #6
0
        def dpti_gdi_loop_md(task_dict):
            context = get_current_context()
            dag_run = context['params']

            submission_dict = dag_run['submission_dict']
            print('submission_dict', submission_dict)
            mdata = dag_run['mdata']
            print('mdata', mdata)
            print('debug:task_dict', task_dict)

            machine = Machine.load_from_machine_dict(mdata)
            batch = machine.batch
            submission = Submission.deserialize(
                submission_dict=submission_dict, batch=batch)
            submission.register_task(task=Task.deserialize(
                task_dict=task_dict))
            submission.run_submission()
            # md_return = prepare_return
            return True
Beispiel #7
0
def post_res(iter_index, json_file, machine_json, cv_file, base_dir="./"):
    json_file = os.path.abspath(json_file)
    machine_json = os.path.abspath(machine_json)
    cv_file = os.path.abspath(cv_file)
    base_dir = os.path.abspath(base_dir) + "/"
    iter_name = make_iter_name(iter_index)
    res_path = base_dir + iter_name + "/" + res_name + "/"
    cwd = os.getcwd()

    fp = open(json_file, 'r')
    jdata = json.load(fp)
    fp.close()

    os.chdir(res_path)
    all_task = glob.glob("/[0-9]*[0-9]")
    all_task = list(
        filter(lambda x: os.path.isdir(x), glob.glob("[0-9]*[0-9]")))
    if len(all_task) == 0:
        np.savetxt(res_path + 'data.raw', [], fmt="%.6e")
        os.chdir(cwd)
        return
    all_task.sort()
    all_task_reldir = [os.path.relpath(ii, res_path) for ii in all_task]

    centers = []
    force = []
    ndim = 0

    _conf_file = os.path.abspath(all_task[0] + "/conf.gro")
    cv_dim_list = cal_cv_dim(_conf_file, cv_file)
    cv_dih_dim = cv_dim_list[0]

    cmpf_cmd = "python3 {}/cmpf.py".format(LIB_PATH)
    cmpf_cmd += " -c %d" % cv_dih_dim
    cmpf_log = "cmpf.log"

    print("rid.post_res.post_res:cmpf_cmd:", cmpf_cmd)

    cmpf_resources = set_resource(machine_json, target="cmpf")
    machine = set_machine(machine_json, target="cmpf")

    cmpf_task = [
        Task(command=cmpf_cmd,
             task_work_path="{}".format(ii),
             outlog=cmpf_log,
             errlog=cmpf_log) for ii in all_task_reldir
    ]
    cmpf_submission = Submission(work_base=res_path,
                                 machine=machine,
                                 resources=cmpf_resources,
                                 task_list=cmpf_task)
    cmpf_submission.run_submission()
    print('cmpf done')

    abs_res_path = os.getcwd()
    for work_path in all_task:
        os.chdir(work_path)
        this_centers = np.loadtxt('centers.out')
        centers = np.append(centers, this_centers)
        this_force = np.loadtxt('force.out')
        force = np.append(force, this_force)
        ndim = this_force.size
        assert (ndim == this_centers.size
                ), "center size is diff to force size in " + work_path
        os.chdir(abs_res_path)

    os.chdir(cwd)
    centers = np.reshape(centers, [-1, ndim])
    force = np.reshape(force, [-1, ndim])
    data = np.concatenate((centers, force), axis=1)
    np.savetxt(res_path + 'data.raw', data, fmt="%.6e")

    norm_force = np.linalg.norm(force, axis=1)
    log_task("min|f| = %e  max|f| = %e  avg|f| = %e" %
             (np.min(norm_force), np.max(norm_force), np.average(norm_force)))
    print("min|f| = %e  max|f| = %e  avg|f| = %e" %
          (np.min(norm_force), np.max(norm_force), np.average(norm_force)))
    print('Saving cmpf finished!')
    print("Post process of restrained MD finished.")
    print(os.getcwd())
Beispiel #8
0
def run_res(iter_index, json_file, machine_json, base_dir="./"):
    json_file = os.path.abspath(json_file)
    fp = open(json_file, 'r')
    jdata = json.load(fp)
    fp.close()
    gmx_prep = jdata["gmx_prep"]
    gmx_run = jdata["gmx_run"]
    res_thread = jdata["res_thread"]
    gmx_run = gmx_run + (" -nt %d" % res_thread)
    gmx_run = gmx_run + " -plumed " + res_plm
    # gmx_cont_run = gmx_run + " -cpi state.cpt"
    gmx_prep_log = "gmx_grompp.log"
    gmx_run_log = "gmx_mdrun.log"
    gmx_prep_cmd = cmd_append_log(gmx_prep, gmx_prep_log)
    gmx_run_cmd = cmd_append_log(gmx_run, gmx_run_log)
    # gmx_cont_run_cmd = cmd_append_log (gmx_cont_run, gmx_run_log)

    base_dir = os.path.abspath(base_dir) + "/"
    iter_name = make_iter_name(iter_index)
    res_path = base_dir + iter_name + "/" + res_name + "/"

    if not os.path.isdir(res_path):
        raise RuntimeError("do not see any restrained simulation (%s)." %
                           res_path)

    all_task = list(
        filter(lambda x: os.path.isdir(x),
               glob.glob(res_path + "/[0-9]*[0-9]")))
    print('run_res:all_task_propose:', all_task)
    print('run_res:gmx_prep_cmd:', gmx_prep_cmd)
    print('run_res:gmx_run_cmd:', gmx_run_cmd)
    # print('run_res:gmx_cont_run_cmd:', gmx_cont_run_cmd)

    if len(all_task) == 0:
        return None
    all_task.sort()
    all_task_basedir = [os.path.relpath(ii, res_path) for ii in all_task]

    res_resources = set_resource(machine_json, target="resMD")
    machine = set_machine(machine_json, target="resMD")

    gmx_prep_task = [
        Task(command=gmx_prep_cmd,
             task_work_path=ii,
             outlog='gmx_grompp.log',
             errlog='gmx_grompp.log') for ii in all_task_basedir
    ]
    gmx_prep_submission = Submission(work_base=res_path,
                                     machine=machine,
                                     resources=res_resources,
                                     task_list=gmx_prep_task)
    gmx_prep_submission.run_submission()

    gmx_run_task = [
        Task(command=gmx_run_cmd,
             task_work_path=ii,
             outlog='gmx_mdrun.log',
             errlog='gmx_mdrun.log') for ii in all_task_basedir
    ]
    gmx_run_submission = Submission(work_base=res_path,
                                    machine=machine,
                                    resources=res_resources,
                                    task_list=gmx_run_task)
    gmx_run_submission.run_submission()
Beispiel #9
0
             task_work_path='bct-1/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task2 = Task(command='lmp -i input.lammps',
             task_work_path='bct-2/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task3 = Task(command='lmp   -i input.lammps',
             task_work_path='bct-3/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task4 = Task(command='lmp -i input.lammps',
             task_work_path='bct-4/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task_list = [
    task1,
    task2,
    task3,
    task4,
]

submission = Submission(work_base='0_md/',
                        machine=machine,
                        resources=resources,
                        forward_common_files=['graph.pb'],
                        backward_common_files=[],
                        task_list=task_list)

submission.run_submission()
from dpdispatcher.ssh_context import SSHSession, SSHContext

from dpdispatcher.submission import Submission, Job, Task, Resources
from dpdispatcher.batch import Batch
from dpdispatcher.pbs import PBS

# local_session = LocalSession({'work_path':'temp2'})
# local_context = LocalContext(local_root='temp1/', work_profile=local_session)
# lazy_local_context = LazyLocalContext(local_root='/home/fengbo/10_dpdispatcher/dpdispatcher/tests/temp3/0_md', work_profile=None)
# pbs = PBS(context=lazy_local_context)
ssh_session = SSHSession(hostname='39.106.84.25', remote_root='/home/fengbo/dp_remote', username='******')
ssh_context = SSHContext(local_root='test_slurm_dir', ssh_session=ssh_session)
pbs = PBS(context=ssh_context)

resources = Resources(number_node=1, cpu_per_node=4, gpu_per_node=1, queue_name="V100_8_32", group_size=4, if_cuda_multi_devices=True) 
submission = Submission(work_base='0_md', resources=resources,  forward_common_files=['graph.pb'], backward_common_files=['submission.json']) #,  batch=PBS)
task1 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-1', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=1)
task2 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-2', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=0.25)
task3 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-3', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=0.25)
task4 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-4', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=0.5)
submission.register_task_list([task1, task2, task3, task4, ])
submission.generate_jobs()
submission.bind_batch(batch=pbs)
# for job in submission.belonging_jobs:
#     job.job_to_json()
# print('111', submission)
# submission2 = Submission.recover_jobs_from_json('./jr.json')
# print('222', submission2)
# print(submission==submission2)
submission.run_submission()
Beispiel #11
0
from dpdispatcher.submission import Submission, Job, Task, Resources
from dpdispatcher.machine import Machine

sys.path.insert(0,
                os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# task_need_resources has no effect
with open("jsons/machine_lsf.json", 'r') as f:
    mdata = json.load(f)

machine = Machine.load_from_dict(mdata['machine'])
resources = Resources.load_from_dict(mdata['resources'])

submission = Submission(work_base='0_md/',
                        machine=machine,
                        resources=resources,
                        forward_common_files=['graph.pb'],
                        backward_common_files=[])

task1 = Task(command='lmp -i input.lammps',
             task_work_path='bct-1/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task2 = Task(command='lmp -i input.lammps',
             task_work_path='bct-2/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task3 = Task(command='lmp -i input.lammps',
             task_work_path='bct-3/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
Beispiel #12
0
resources = Resources(number_node=1,
                      cpu_per_node=4,
                      gpu_per_node=0,
                      queue_name="gpu",
                      walltime="24:00:00",
                      prepend_text=prepend_text,
                      append_text="",
                      gpu_usage=False,
                      gpu_new_syntax=False,
                      extra_specification=lsf_bsub_dict,
                      group_size=1)

# task_need_resources has no effect
submission = Submission(
    work_base='0_md',  # the dir containing all of task_work_path
    resources=resources,  # resources above
    forward_common_files=['graph.pb'],  # file to be upload
    backward_common_files=['*.json']  # file to be downloaded
)
task1 = Task(command='lmp_mpi_20201029 -i input.lammps',
             task_work_path='bct-1',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'],
             task_need_resources=1)
task2 = Task(command='lmp_mpi_20201029 -i input.lammps',
             task_work_path='bct-2',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'],
             task_need_resources=0.25)
task3 = Task(command='lmp_mpi_20201029 -i input.lammps',
             task_work_path='bct-3',
             forward_files=['conf.lmp', 'input.lammps'],
Beispiel #13
0
def run_enhc(iter_index, json_file):
    iter_name = make_iter_name(iter_index)
    work_path = iter_name + "/" + enhc_name + "/"

    fp = open(json_file, 'r')
    jdata = json.load(fp)
    gmx_prep = jdata["gmx_prep"] + ' -f grompp_restraint.mdp -r conf_init.gro'
    gmx_run = jdata["gmx_run"]
    enhc_thread = jdata["bias_thread"]
    gmx_run = gmx_run + (" -nt %d " % enhc_thread)
    gmx_prep_log = "gmx_grompp.log"
    gmx_run_log = "gmx_mdrun.log"
    # assuming at least one walker
    graph_files = glob.glob(work_path + (make_walker_name(0)) + "/*.pb")
    if len(graph_files) != 0:
        gmx_run = gmx_run + " -plumed " + enhc_plm
    else:
        gmx_run = gmx_run + " -plumed " + enhc_bf_plm
    gmx_prep_cmd = cmd_append_log(gmx_prep, gmx_prep_log)
    gmx_run_cmd = cmd_append_log(gmx_run, gmx_run_log)
    numb_walkers = jdata["numb_walkers"]
    batch_jobs = jdata['batch_jobs']
    batch_time_limit = jdata['batch_time_limit']
    batch_modules = jdata['batch_modules']
    batch_sources = jdata['batch_sources']

    print('debug', glob.glob(work_path + "/[0-9]*[0-9]"))
    # all_task = glob.glob(work_path + "/[0-9]*[0-9]")
    all_task = list(
        filter(lambda x: os.path.isdir(x),
               glob.glob(work_path + "/[0-9]*[0-9]")))
    all_task.sort()

    all_task_basedir = [os.path.relpath(ii, work_path) for ii in all_task]
    print('run_enhc:work_path', work_path)
    print('run_enhc:gmx_prep_cmd:', gmx_prep_cmd)
    print('run_enhc:gmx_run_cmd:', gmx_run_cmd)
    print('run_enhc:all_task:', all_task)
    print('run_enhc:all_task_basedir:', all_task_basedir)
    print('run_enhc:batch_jobs:', batch_jobs)

    lazy_local_context = LazyLocalContext(local_root='./', work_profile=None)
    # pbs = PBS(context=lazy_local_context)
    slurm = Slurm(context=lazy_local_context)
    gmx_prep_task = [
        Task(command=gmx_prep_cmd,
             task_work_path=ii,
             outlog='gmx_grompp.log',
             errlog='gmx_grompp.err') for ii in all_task_basedir
    ]
    gmx_prep_submission = Submission(work_base=work_path,
                                     resources=resources,
                                     batch=slurm,
                                     task_list=gmx_prep_task)

    gmx_prep_submission.run_submission()

    gmx_run_task = [
        Task(command=gmx_run_cmd,
             task_work_path=ii,
             outlog='gmx_mdrun.log',
             errlog='gmx_mdrun.log') for ii in all_task_basedir
    ]
    gmx_run_submission = Submission(work_base=work_path,
                                    resources=resources,
                                    batch=slurm,
                                    task_list=gmx_run_task)
    gmx_run_submission.run_submission()
Beispiel #14
0
def run_train(iter_index, json_file, machine_json, cv_file, base_dir="./"):
    json_file = os.path.abspath(json_file)
    cv_file = os.path.abspath(cv_file)
    fp = open(json_file, 'r')
    jdata = json.load(fp)
    fp.close()
    cv_file = os.path.abspath(cv_file)
    numb_model = jdata["numb_model"]
    train_thread = jdata["train_thread"]
    res_iter = jdata["res_iter"]
    base_dir = os.path.abspath(base_dir) + "/"
    iter_name = make_iter_name(iter_index)
    train_path = base_dir + iter_name + "/" + train_name + "/"
    if check_new_data(iter_index, train_path, base_dir):
        return

    enhc_path = base_dir + iter_name + "/" + enhc_name + "/"
    _conf_file = enhc_path + "000/conf.gro"
    cv_dim_list = cal_cv_dim(_conf_file, cv_file)

    cwd = os.getcwd()
    neurons = jdata["neurons"]
    batch_size = jdata["batch_size"]
    if iter_index < res_iter:
        numb_epoches = jdata["numb_epoches"]
        starter_lr = jdata["starter_lr"]
        decay_steps = jdata["decay_steps"]
        decay_rate = jdata["decay_rate"]
        cmdl_args = ""
    else:
        numb_epoches = jdata["res_numb_epoches"]
        starter_lr = jdata["res_starter_lr"]
        decay_steps = jdata["res_decay_steps"]
        decay_rate = jdata["res_decay_rate"]
        old_ratio = jdata["res_olddata_ratio"]
        cmdl_args = " --restart --use-mix --old-ratio %f " % old_ratio

    if jdata["resnet"]:
        cmdl_args += " --resnet "
    cmdl_args += " -n "
    for nn in neurons:
        cmdl_args += "%d " % nn
    cmdl_args += " -c "
    for cv_dim in cv_dim_list:
        cmdl_args += "%d " % cv_dim
    cmdl_args += " -b " + str(batch_size)
    cmdl_args += " -e " + str(numb_epoches)
    cmdl_args += " -l " + str(starter_lr)
    cmdl_args += " --decay-steps " + str(decay_steps)
    cmdl_args += " --decay-rate " + str(decay_rate)

    train_cmd = "python3 {}/train.py -t {:d}".format(NN_PATH, train_thread)
    train_cmd += cmdl_args
    train_cmd = cmd_append_log(train_cmd, "train.log")
    freez_cmd = "python3 {}/freeze.py -o graph.pb".format(NN_PATH)
    freez_cmd = cmd_append_log(freez_cmd, "freeze.log")
    task_dirs = [("%03d" % ii) for ii in range(numb_model)]

    print('lib.modeling.run_train:train_cmd:', train_cmd)
    print('lib.modeling.run_train:freez_cmd:', freez_cmd)
    print('lib.modeling.run_train:train_path:', train_path)
    print('lib.modeling.run_train:task_dirs:', task_dirs)

    resources = set_resource(machine_json, target="train")
    machine = set_machine(machine_json, target="train")

    train_task = [
        Task(command=train_cmd,
             task_work_path=ii,
             outlog='train.log',
             errlog='train.log') for ii in task_dirs
    ]
    train_submission = Submission(work_base=train_path,
                                  machine=machine,
                                  resources=resources,
                                  task_list=train_task)
    train_submission.run_submission()

    freez_task = [
        Task(command=freez_cmd,
             task_work_path=ii,
             outlog='freeze.log',
             errlog='freeze.log') for ii in task_dirs
    ]
    freez_submission = Submission(work_base=train_path,
                                  machine=machine,
                                  resources=resources,
                                  task_list=freez_task)
    freez_submission.run_submission()

    os.chdir(train_path)
    for ii in range(numb_model):
        os.symlink("%03d/graph.pb" % ii, "graph.%03d.pb" % ii)
    os.chdir(cwd)

    print("Training finished!")
# local_session = LocalSession({'work_path':'temp2'})
# local_context = LocalContext(local_root='temp1/0_md', work_profile=local_session)
lazy_local_context = LazyLocalContext(
    local_root='/home/fengbo/10_dpdispatcher/dpdispatcher/tests/temp3',
    work_profile=None)

# pbs = PBS(context=local_context)
pbs = PBS(context=lazy_local_context)

resources = Resources(number_node=1,
                      cpu_per_node=4,
                      gpu_per_node=1,
                      queue_name="V100_8_32",
                      group_size=4,
                      if_cuda_multi_devices=True)
submission = Submission(work_base='0_md', resources=resources)
task1 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-1')
task2 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-2')
task3 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-3')
task4 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-4')
submission.register_task_list([
    task1,
    task2,
    task3,
    task4,
])
submission.generate_jobs()
submission.bind_batch(batch=pbs)
# for job in submission.belonging_jobs:
#     job.job_to_json()
# print('111', submission)