Пример #1
0
def set_resource(json_file, target='enhcMD'):
    fp = open(json_file, 'r')
    jdata = json.load(fp)
    fp.close()
    resources_dict = jdata[target]["resources"]
    resources = Resources.load_from_dict(resources_dict)
    return resources
Пример #2
0
    def __init__(self, task_dict_list, submission_dict, machine_name,
                 resource_dict, **kwargs):

        task_list = [Task(**task_dict) for task_dict in task_dict_list]

        machine_dict = settings['POOL'][machine_name]
        machine = Machine.load_from_dict(machine_dict['machine'])
        resources = Resources.load_from_dict(resource_dict)

        self.submission = Submission(machine=machine,
                                     resources=resources,
                                     task_list=task_list,
                                     **submission_dict)
Пример #3
0
def get_empty_submission(job_work_dir):
    machine_file = os.path.join(job_work_dir, '../', '../', '../',
                                'machine.json')
    with open(machine_file, 'r') as f:
        mdata = json.load(f)

    machine = Machine.load_from_dict(mdata['machine'])
    resources = Resources.load_from_dict(mdata['resources'])

    submission = Submission(
        work_base=job_work_dir,
        resources=resources,
        machine=machine,
    )
    return submission
Пример #4
0
# resources = Resources(number_node=1, cpu_per_node=4, gpu_per_node=0, queue_name="1 * NVIDIA P100", group_size=4)
# slurm_sbatch_dict={'mem': '10G', 'cpus_per_task':1, 'time': "120:0:0"}
# slurm_resources = SlurmResources(resources=resources, slurm_sbatch_dict=slurm_sbatch_dict)

# dp_cloud_server_context = DpCloudServerContext(
#     local_root='test_context_dir/',
#     username='******',
#     password='******')
# dp_cloud_server = DpCloudServer(context=dp_cloud_server_context)
# with open('test_dp_cloud_server.json', 'r') as f:
#     jdata = json.load(f)
with open('jsons/machine_dp_cloud_server.json', 'r') as f:
    compute_dict = json.load(f)

machine = Machine.load_from_dict(compute_dict['machine'])
resources = Resources.load_from_dict(compute_dict['resources'])

task1 = Task(command='lmp    -i input.lammps',
             task_work_path='bct-1/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task2 = Task(command='lmp -i input.lammps',
             task_work_path='bct-2/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task3 = Task(command='lmp   -i input.lammps',
             task_work_path='bct-3/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task4 = Task(command='lmp -i input.lammps',
             task_work_path='bct-4/',
from dpdispatcher.lazy_local_context import LazyLocalContext
from dpdispatcher.ssh_context import SSHSession, SSHContext

from dpdispatcher.submission import Submission, Job, Task, Resources
from dpdispatcher.batch import Batch
from dpdispatcher.pbs import PBS

# local_session = LocalSession({'work_path':'temp2'})
# local_context = LocalContext(local_root='temp1/', work_profile=local_session)
# lazy_local_context = LazyLocalContext(local_root='/home/fengbo/10_dpdispatcher/dpdispatcher/tests/temp3/0_md', work_profile=None)
# pbs = PBS(context=lazy_local_context)
ssh_session = SSHSession(hostname='39.106.84.25', remote_root='/home/fengbo/dp_remote', username='******')
ssh_context = SSHContext(local_root='test_slurm_dir', ssh_session=ssh_session)
pbs = PBS(context=ssh_context)

resources = Resources(number_node=1, cpu_per_node=4, gpu_per_node=1, queue_name="V100_8_32", group_size=4, if_cuda_multi_devices=True) 
submission = Submission(work_base='0_md', resources=resources,  forward_common_files=['graph.pb'], backward_common_files=['submission.json']) #,  batch=PBS)
task1 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-1', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=1)
task2 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-2', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=0.25)
task3 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-3', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=0.25)
task4 = Task(command='lmp_serial -i input.lammps', task_work_path='bct-4', forward_files=['conf.lmp', 'input.lammps'], backward_files=['log.lammps'], task_need_resources=0.5)
submission.register_task_list([task1, task2, task3, task4, ])
submission.generate_jobs()
submission.bind_batch(batch=pbs)
# for job in submission.belonging_jobs:
#     job.job_to_json()
# print('111', submission)
# submission2 = Submission.recover_jobs_from_json('./jr.json')
# print('222', submission2)
# print(submission==submission2)
submission.run_submission()
Пример #6
0
#%%
# import sys, os
# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..' )))
# import dpdispatcher
from dpdispatcher.submission import Resources, Task
from dpdispatcher.machine import Machine


# %%
resources_dargs_doc = Resources.arginfo().gen_doc()
with open('../doc/resources-auto.rst', 'w') as f:
    # print(resources_dargs_doc)
    f.write(resources_dargs_doc)

machine_dargs_doc = Machine.arginfo().gen_doc()
with open('../doc/machine-auto.rst', 'w') as f:
    f.write(machine_dargs_doc)

task_dargs_doc = Task.arginfo().gen_doc()
with open('../doc/task-auto.rst', 'w') as f:
    f.write(task_dargs_doc)


# %%
Пример #7
0
import os
import sys
import json
from dpdispatcher.submission import Submission, Job, Task, Resources
from dpdispatcher.machine import Machine

sys.path.insert(0,
                os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# task_need_resources has no effect
with open("jsons/machine_lsf.json", 'r') as f:
    mdata = json.load(f)

machine = Machine.load_from_dict(mdata['machine'])
resources = Resources.load_from_dict(mdata['resources'])

submission = Submission(work_base='0_md/',
                        machine=machine,
                        resources=resources,
                        forward_common_files=['graph.pb'],
                        backward_common_files=[])

task1 = Task(command='lmp -i input.lammps',
             task_work_path='bct-1/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task2 = Task(command='lmp -i input.lammps',
             task_work_path='bct-2/',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'])
task3 = Task(command='lmp -i input.lammps',
Пример #8
0
lsf = LSF(context=ssh_context)

prepend_text = '''
module load cuda/9.2
module load gcc/4.9.4
module load deepmd/1.0
source /home/dp/scripts/avail_gpu.sh
'''

lsf_bsub_dict = {'R': "'select[hname != g005]'"}
resources = Resources(number_node=1,
                      cpu_per_node=4,
                      gpu_per_node=0,
                      queue_name="gpu",
                      walltime="24:00:00",
                      prepend_text=prepend_text,
                      append_text="",
                      gpu_usage=False,
                      gpu_new_syntax=False,
                      extra_specification=lsf_bsub_dict,
                      group_size=1)

# task_need_resources has no effect
submission = Submission(
    work_base='0_md',  # the dir containing all of task_work_path
    resources=resources,  # resources above
    forward_common_files=['graph.pb'],  # file to be upload
    backward_common_files=['*.json']  # file to be downloaded
)
task1 = Task(command='lmp_mpi_20201029 -i input.lammps',
             task_work_path='bct-1',
Пример #9
0
from dpdispatcher.submission import Submission, Job, Task, Resources
from dpdispatcher.batch import Batch
# from dpdispatcher.pbs import PBS
from dpdispatcher.shell import Shell

local_session = LocalSession({'work_path': 'temp2'})
local_context = LocalContext(local_root='test_shell_dir/',
                             work_profile=local_session)
# lazy_local_context = LazyLocalContext(local_root='/home/fengbo/10_dpdispatcher/dpdispatcher/tests/temp3/0_md', work_profile=None)
shell = Shell(context=local_context)
# pbs = PBS(context=lazy_local_context)

resources = Resources(number_node=1,
                      cpu_per_node=4,
                      gpu_per_node=1,
                      queue_name="V100_8_32",
                      group_size=4)
submission = Submission(work_base='0_md',
                        resources=resources,
                        forward_common_files=['graph.pb'],
                        backward_common_files=['submission.json'
                                               ])  #,  batch=PBS)
task1 = Task(command='lmp_serial -i input.lammps',
             task_work_path='bct-1',
             forward_files=['conf.lmp', 'input.lammps'],
             backward_files=['log.lammps'],
             task_need_resources=1)
task2 = Task(command='lmp_serial -i input.lammps',
             task_work_path='bct-2',
             forward_files=['conf.lmp', 'input.lammps'],