def jobfunc(pmemd, datadir, path, gpuid): # path and gpuid arguments are provided by default by the localqueue class as last arguments # AMBER automatically runs the calculation on the GPU with the # most memory even if that GPU is already in use # See: http://ambermd.org/gpus/#Running # We assume the machine has the exhaustive mode setup # So if other GPUs are being used when we launch the adaptive # runs, jobs are only run in the free GPUs # You set Persistence and Compute Exclusive Modes by running the following as root: # $ nvidia-smi -pm 1 # $ nvidia-smi -c 3 # need to tell the shell script what engine we are using with open(os.path.join(path, 'MD.sh'), 'r') as bash: bash_file = bash.read() bash_file = bash_file.replace('ENGINE', pmemd) # logger.info('BASH script: {}'.format(bash)) with open(os.path.join(path, 'MD.sh'), 'w') as equil: equil.write(bash_file) cmd = """cd {} && bash {} > log.txt 2>&1""".format(os.path.normpath(path), 'MD.sh') _executeMDcommand(cmd, path, datadir, 'pmemd.cuda', '*.nc')
def jobfunc(acemd, datadir, inputfile, timeout, path, gpuid): # path and gpuid arguments are provided by default by the localqueue class as last arguments import os timeoutstr = '' if timeout: timeoutstr = 'timeout {}'.format(timeout) cmd = 'cd {}; {} {} --device {} {} > log.txt 2>&1'.format(os.path.normpath(path), timeoutstr, acemd, gpuid, inputfile) logger.debug(cmd) _executeMDcommand(cmd, path, datadir, 'ACEMD', '*.xtc')