def compute(self): self.is_cacheable = lambda *args, **kwargs: False if not self.hasInputFromPort('machine'): raise ModuleError(self, "No machine specified") machine = self.getInputFromPort('machine').machine if not self.hasInputFromPort('local_directory'): raise ModuleError(self, "No local directory specified") local_directory = self.getInputFromPort('local_directory').strip() if not self.hasInputFromPort('remote_directory'): raise ModuleError(self, "No remote directory specified") remote_directory = self.getInputFromPort('remote_directory').strip() whereto = 'remote' if self.hasInputFromPort('to_local') and self.getInputFromPort( 'to_local'): whereto = 'local' jm = JobMonitor.getInstance() cache = jm.getCache(self.signature) if not cache: ## This indicates that the coming commands submitted on the machine # trick to select machine without initializing every time use_machine(machine) to_dir = local_directory if whereto == 'local' else remote_directory cdir = CreateDirectory(whereto, to_dir) job = TransferFiles(whereto, local_directory, remote_directory, dependencies=[cdir]) job.run() end_machine() cache = jm.setCache(self.signature, {'result': ''}) self.setResult("machine", machine)
def compute(self): self.is_cacheable = lambda *args, **kwargs: False if not self.hasInputFromPort('machine'): raise ModuleError(self, "No machine specified") machine = self.getInputFromPort('machine').machine if not self.hasInputFromPort('local_directory'): raise ModuleError(self, "No local directory specified") local_directory = self.getInputFromPort('local_directory').strip() if not self.hasInputFromPort('remote_directory'): raise ModuleError(self, "No remote directory specified") remote_directory = self.getInputFromPort('remote_directory').strip() whereto = 'remote' if self.hasInputFromPort('to_local') and self.getInputFromPort('to_local'): whereto = 'local' jm = JobMonitor.getInstance() cache = jm.getCache(self.signature) if not cache: ## This indicates that the coming commands submitted on the machine # trick to select machine without initializing every time use_machine(machine) to_dir = local_directory if whereto=='local' else remote_directory cdir = CreateDirectory(whereto, to_dir) job = TransferFiles(whereto, local_directory, remote_directory, dependencies = [cdir]) job.run() end_machine() cache = jm.setCache(self.signature, {'result':''}) self.setResult("machine", machine)
def finishJob(self, params): job_info = self.job.get_job_info() if job_info: self.annotate({'job_info': job_info}) # copies the created files to the client get_result = TransferFiles("local", params['input_directory'], params['working_directory'], dependencies = [self.cdir]) get_result.run() end_machine() stdout = self.job.standard_output() stderr = self.job.standard_error() return {'stdout':stdout, 'stderr':stderr}
def compute(self): server = self.getInputFromPort('server') \ if self.hasInputFromPort('server') else 'localhost' port = self.getInputFromPort('port') \ if self.hasInputFromPort('port') else 22 username = self.getInputFromPort('username') \ if self.hasInputFromPort('username') else current_user() password = self.getInputFromPort('password') \ if self.hasInputFromPort('password') else '' self.machine = BQMachine(server, username, password, port) # force creation of server-side help files select_machine(self.machine) end_machine() self.setResult("value", self)
def finishJob(self, params): job_info = self.job.get_job_info() if job_info: self.annotate({'job_info': job_info}) # copies the created files to the client get_result = TransferFiles("local", params['input_directory'], params['working_directory'], dependencies=[self.cdir]) get_result.run() end_machine() stdout = self.job.standard_output() stderr = self.job.standard_error() return {'stdout': stdout, 'stderr': stderr}
def startJob(self, params): work_dir = params['working_directory'] use_machine(self.machine) self.cdir = CreateDirectory("remote", work_dir) trans = TransferFiles("remote", params['input_directory'], work_dir, dependencies = [self.cdir]) self.job = PBSScript("remote", params['command'], work_dir, dependencies = [trans], **params['additional_arguments']) self.job.run() try: ret = self.job._ret if ret: job_id = int(ret) except ValueError: end_machine() raise ModuleError(self, "Error submitting job: %s" % ret) return params
def compute(self): if not self.hasInputFromPort('machine'): raise ModuleError(self, "No machine specified") if not self.hasInputFromPort('command'): raise ModuleError(self, "No command specified") command = self.getInputFromPort('command').strip() machine = self.getInputFromPort('machine').machine jm = JobMonitor.getInstance() cache = jm.getCache(self.signature) if cache: result = cache['result'] else: ## This indicates that the coming commands submitted on the machine # trick to select machine without initializing every time use_machine(machine) m = current_machine() result = m.remote.send_command(command) end_machine() cache = jm.setCache(self.signature, {'result':result}) self.setResult("output", result) self.setResult("machine", self.getInputFromPort('machine'))
def compute(self): if not self.hasInputFromPort('machine'): raise ModuleError(self, "No machine specified") if not self.hasInputFromPort('command'): raise ModuleError(self, "No command specified") command = self.getInputFromPort('command').strip() machine = self.getInputFromPort('machine').machine jm = JobMonitor.getInstance() cache = jm.getCache(self.signature) if cache: result = cache['result'] else: ## This indicates that the coming commands submitted on the machine # trick to select machine without initializing every time use_machine(machine) m = current_machine() result = m.remote.send_command(command) end_machine() cache = jm.setCache(self.signature, {'result': result}) self.setResult("output", result) self.setResult("machine", self.getInputFromPort('machine'))
def startJob(self, params): work_dir = params['working_directory'] use_machine(self.machine) self.cdir = CreateDirectory("remote", work_dir) trans = TransferFiles("remote", params['input_directory'], work_dir, dependencies=[self.cdir]) self.job = PBSScript("remote", params['command'], work_dir, dependencies=[trans], **params['additional_arguments']) self.job.run() try: ret = self.job._ret if ret: job_id = int(ret) except ValueError: end_machine() raise ModuleError(self, "Error submitting job: %s" % ret) return params
def newfnc(*args, **kwargs): select_machine(machine) ret = fnc(*args, **kwargs) end_machine() return ret
def compute(self): self.is_cacheable = lambda *args, **kwargs: False if not self.hasInputFromPort('machine'): raise ModuleError(self, "No machine specified") machine = self.getInputFromPort('machine').machine if not self.hasInputFromPort('command'): raise ModuleError(self, "No command specified") command = self.getInputFromPort('command').strip() working_directory = self.getInputFromPort('working_directory') \ if self.hasInputFromPort('working_directory') else '.' if not self.hasInputFromPort('input_directory'): raise ModuleError(self, "No input directory specified") input_directory = self.getInputFromPort('input_directory').strip() additional_arguments = {'processes': 1, 'time': -1, 'mpi': False, 'threads': 1, 'memory':-1, 'diskspace': -1} for k in additional_arguments: if self.hasInputFromPort(k): additional_arguments[k] = self.getInputFromPort(k) ## This indicates that the coming commands submitted on the machine # trick to select machine without initializing every time use_machine(machine) cdir = CreateDirectory("remote", working_directory) trans = TransferFiles("remote", input_directory, working_directory, dependencies = [cdir]) job = PBS("remote", command, working_directory, dependencies = [trans], **additional_arguments) job.run() try: ret = job._ret if ret: job_id = int(ret) except ValueError: end_machine() raise ModuleError(self, "Error submitting job: %s" % ret) finished = job.finished() job_info = job.get_job_info() if job_info: self.annotate({'job_info': job.get_job_info()}) if not finished: status = job.status() # try to get more detailed information about the job # this only seems to work on some versions of torque if job_info: comment = [line for line in job_info.split('\n') if line.startswith('comment =')] if comment: status += ': ' + comment[10:] end_machine() raise ModuleSuspended(self, '%s' % status, queue=job) self.is_cacheable = lambda *args, **kwargs: True # copies the created files to the client get_result = TransferFiles("local", input_directory, working_directory, dependencies = [cdir]) get_result.run() ## Popping from the machine stack end_machine() self.setResult("stdout", job.standard_output()) self.setResult("stderr", job.standard_error()) files = machine.local.send_command("ls -l %s" % input_directory) self.setResult("file_list", [f.split(' ')[-1] for f in files.split('\n')[1:]])
def compute(self): self.is_cacheable = lambda *args, **kwargs: False if not self.hasInputFromPort('machine'): raise ModuleError(self, "No machine specified") machine = self.getInputFromPort('machine').machine if not self.hasInputFromPort('command'): raise ModuleError(self, "No command specified") command = self.getInputFromPort('command').strip() working_directory = self.getInputFromPort('working_directory') \ if self.hasInputFromPort('working_directory') else '.' if not self.hasInputFromPort('input_directory'): raise ModuleError(self, "No input directory specified") input_directory = self.getInputFromPort('input_directory').strip() additional_arguments = { 'processes': 1, 'time': -1, 'mpi': False, 'threads': 1, 'memory': -1, 'diskspace': -1 } for k in additional_arguments: if self.hasInputFromPort(k): additional_arguments[k] = self.getInputFromPort(k) ## This indicates that the coming commands submitted on the machine # trick to select machine without initializing every time use_machine(machine) cdir = CreateDirectory("remote", working_directory) trans = TransferFiles("remote", input_directory, working_directory, dependencies=[cdir]) job = PBS("remote", command, working_directory, dependencies=[trans], **additional_arguments) job.run() try: ret = job._ret if ret: job_id = int(ret) except ValueError: end_machine() raise ModuleError(self, "Error submitting job: %s" % ret) finished = job.finished() job_info = job.get_job_info() if job_info: self.annotate({'job_info': job.get_job_info()}) if not finished: status = job.status() # try to get more detailed information about the job # this only seems to work on some versions of torque if job_info: comment = [ line for line in job_info.split('\n') if line.startswith('comment =') ] if comment: status += ': ' + comment[10:] end_machine() raise ModuleSuspended(self, '%s' % status, queue=job) self.is_cacheable = lambda *args, **kwargs: True # copies the created files to the client get_result = TransferFiles("local", input_directory, working_directory, dependencies=[cdir]) get_result.run() ## Popping from the machine stack end_machine() self.setResult("stdout", job.standard_output()) self.setResult("stderr", job.standard_error()) files = machine.local.send_command("ls -l %s" % input_directory) self.setResult("file_list", [f.split(' ')[-1] for f in files.split('\n')[1:]])