def process(self, item): job = item.jobObj ## It is very likely that the job's downloading task has been created ## and assigned in a previous monitoring loop ignore such kind of cases if job.status in ['completing', 'completed', 'failed']: return True job.updateStatus('completing') try: logger.info("poststaging output sandbox files") logger.info(" * %s", saga.url(job.backend.saga_job_out).url) stdout = saga.filesystem.file(saga.url(job.backend.saga_job_out)) stdout.copy("file://localhost/" + job.getOutputWorkspace().getPath() + "stdout") logger.info(" * %s", saga.url(job.backend.saga_job_err).url) stderr = saga.filesystem.file(saga.url(job.backend.saga_job_err)) stderr.copy("file://localhost/" + job.getOutputWorkspace().getPath() + "stderr") if len(job.outputsandbox) > 0: output_sandbox = saga.url(job.backend.filesystem_url + "/" + job.backend.workdir_uuid + "/_output_sandbox.tgz") logger.info(" * %s", output_sandbox.url) osb = saga.filesystem.file(output_sandbox) osb.copy("file://localhost/" + job.getOutputWorkspace().getPath()) ## Unpack the output sandbox and delete the archive osbpath = job.getOutputWorkspace().getPath( ) + "_output_sandbox.tgz" if os.path.exists(osbpath): if os.system("tar -C %s -xzf %s" % (job.getOutputWorkspace().getPath(), job.getOutputWorkspace().getPath() + "/_output_sandbox.tgz")) != 0: job.updateStatus('failed') raise Exception('cannot upack output sandbox') os.remove(osbpath) job.updateStatus('completed') except saga.exception as e: logger.error('exception caught while poststaging: %s', e.get_full_message()) job.updateStatus('failed') return True
def process(self, item): job = item.jobObj ## It is very likely that the job's downloading task has been created ## and assigned in a previous monitoring loop ignore such kind of cases if job.status in ['completing', 'completed', 'failed']: return True job.updateStatus('completing') try: logger.info("poststaging output sandbox files") logger.info(" * %s", saga.url(job.backend.saga_job_out).url) stdout = saga.filesystem.file(saga.url(job.backend.saga_job_out)); stdout.copy("file://localhost/"+job.getOutputWorkspace().getPath()+"stdout"); logger.info(" * %s", saga.url(job.backend.saga_job_err).url) stderr = saga.filesystem.file(saga.url(job.backend.saga_job_err)); stderr.copy("file://localhost/"+job.getOutputWorkspace().getPath()+"stderr"); if len(job.outputsandbox) > 0 : output_sandbox = saga.url(job.backend.filesystem_url+"/"+job.backend.workdir_uuid+"/_output_sandbox.tgz") logger.info(" * %s", output_sandbox.url) osb = saga.filesystem.file(output_sandbox) osb.copy("file://localhost/"+job.getOutputWorkspace().getPath()) ## Unpack the output sandbox and delete the archive osbpath = job.getOutputWorkspace().getPath()+"_output_sandbox.tgz" if os.path.exists(osbpath): if os.system("tar -C %s -xzf %s"%(job.getOutputWorkspace().getPath(),job.getOutputWorkspace().getPath()+"/_output_sandbox.tgz")) != 0: job.updateStatus('failed') raise Exception('cannot upack output sandbox') os.remove(osbpath) job.updateStatus('completed') except saga.exception as e: logger.error('exception caught while poststaging: %s', e.get_full_message()) job.updateStatus('failed') return True
def prestagesandbox(self, jobconfig): job = self.getJobObject() mon = job.getMonitoringService() inw = job.getInputWorkspace() sandbox_files = jobconfig.getSandboxFiles() logger.info("pre-staging files for saga job with id: %s", job.id) ## Compression is ENABLED. All input sandbox files are archived ## into an uncompressed tarball which is then transfered to the ## execution host. This speeds up things a lot in many scenarios. ## if self.enable_compression: logger.info(" * adding %s user defined files to input sandbox", len(sandbox_files)) import Ganga.Core.Sandbox as Sandbox compressed_input_sandbox = job.createPackedInputSandbox( jobconfig.getSandboxFiles() + Sandbox.getGangaModulesAsSandboxFiles(Sandbox.getDefaultModules()) + Sandbox.getGangaModulesAsSandboxFiles(mon.getSandboxModules()) ) try: for f in compressed_input_sandbox: archive_url = saga.url("file://localhost/" + f) logger.info( " * copying %s -> %s ", archive_url.url, self.filesystem_url + "/" + self.workdir_uuid + "/" ) sf = saga.filesystem.file(archive_url.url) sf.copy(self.filesystem_url + "/" + self.workdir_uuid + "/", saga.filesystem.Overwrite) except saga.exception as e: logger.error("exception caught while transfering file: %s", e.get_all_exceptions()) job.updateStatus("failed") return False ## Compression is DISABLED. All input sandbox files are transfered ## one-by-one to the execution host. This can be very slow, especially ## if SAGA is using the Globus GridFTP adaptor. else: logger.info("prestaging %s input sandbox files", len(sandbox_files)) for f in sandbox_files: try: source = saga.url(f.name) if len(source.scheme) == 0: source.scheme = "file" if len(source.host) == 0: source.host = "localhost" if f.subdir != ".": # create subdirectory & copy target = saga.url(self.filesystem_url + "/" + self.workdir_uuid + "/" + f.subdir + "/") sd = saga.filesystem.directory(target, saga.filesystem.Create) sf = saga.filesystem.file(source) logger.info(" * copying %s -> %s ", source, target) sf.copy(target, saga.filesystem.Overwrite) # copy the file else: # copy to remote work dir logger.info( " * copying %s -> %s ", source, self.filesystem_url + "/" + self.workdir_uuid + "/" ) sf = saga.filesystem.file(source) sf.copy(self.filesystem_url + "/" + self.workdir_uuid + "/", saga.filesystem.Overwrite) except saga.exception as e: logger.error("exception caught while transfering file: %s", e.get_all_exceptions()) job.updateStatus("failed") return False ## ## copy the job script ## try: jobscript_path = saga.url("file://localhost/" + inw.getPath() + "/__jobscript__") if os.path.exists(jobscript_path.path): logger.info( " * copying %s -> %s ", jobscript_path.url, self.filesystem_url + "/" + self.workdir_uuid + "/" ) sf = saga.filesystem.file(jobscript_path.url) sf.copy(self.filesystem_url + "/" + self.workdir_uuid + "/", saga.filesystem.Overwrite) except saga.exception as e: logger.error("exception caught while transfering file: %s", e.get_all_exceptions()) job.updateStatus("failed") return False return True
def prestagesandbox(self, jobconfig): job = self.getJobObject() mon = job.getMonitoringService() inw = job.getInputWorkspace() sandbox_files = jobconfig.getSandboxFiles() logger.info("pre-staging files for saga job with id: %s", job.id) ## Compression is ENABLED. All input sandbox files are archived ## into an uncompressed tarball which is then transfered to the ## execution host. This speeds up things a lot in many scenarios. ## if self.enable_compression: logger.info(" * adding %s user defined files to input sandbox", len(sandbox_files)) import Ganga.Core.Sandbox as Sandbox from Ganga.GPIDev.Lib.File import File from Ganga.Core.Sandbox.WNSandbox import PYTHON_DIR import inspect fileutils = File(inspect.getsourcefile(Ganga.Utility.files), subdir=PYTHON_DIR) compressed_input_sandbox = job.createPackedInputSandbox( jobconfig.getSandboxFiles() + [fileutils]) try: for f in compressed_input_sandbox: archive_url = saga.url("file://localhost/" + f) logger.info( " * copying %s -> %s ", archive_url.url, self.filesystem_url + "/" + self.workdir_uuid + "/") sf = saga.filesystem.file(archive_url.url) sf.copy( self.filesystem_url + "/" + self.workdir_uuid + "/", saga.filesystem.Overwrite) except saga.exception as e: logger.error('exception caught while transfering file: %s', e.get_all_exceptions()) job.updateStatus("failed") return False ## Compression is DISABLED. All input sandbox files are transfered ## one-by-one to the execution host. This can be very slow, especially ## if SAGA is using the Globus GridFTP adaptor. else: logger.info("prestaging %s input sandbox files", len(sandbox_files)) for f in sandbox_files: try: source = saga.url(f.name) if (len(source.scheme) == 0): source.scheme = "file" if (len(source.host) == 0): source.host = "localhost" if (f.subdir != '.'): # create subdirectory & copy target = saga.url(self.filesystem_url + "/" + self.workdir_uuid + "/" + f.subdir + "/") sd = saga.filesystem.directory(target, saga.filesystem.Create) sf = saga.filesystem.file(source) logger.info(" * copying %s -> %s ", source, target) sf.copy(target, saga.filesystem.Overwrite) # copy the file else: # copy to remote work dir logger.info( " * copying %s -> %s ", source, self.filesystem_url + "/" + self.workdir_uuid + "/") sf = saga.filesystem.file(source) sf.copy( self.filesystem_url + "/" + self.workdir_uuid + "/", saga.filesystem.Overwrite) except saga.exception as e: logger.error('exception caught while transfering file: %s', e.get_all_exceptions()) job.updateStatus("failed") return False ## ## copy the job script ## try: jobscript_path = saga.url("file://localhost/" + inw.getPath() + "/__jobscript__") if os.path.exists(jobscript_path.path): logger.info( " * copying %s -> %s ", jobscript_path.url, self.filesystem_url + "/" + self.workdir_uuid + "/") sf = saga.filesystem.file(jobscript_path.url) sf.copy(self.filesystem_url + "/" + self.workdir_uuid + "/", saga.filesystem.Overwrite) except saga.exception as e: logger.error('exception caught while transfering file: %s', e.get_all_exceptions()) job.updateStatus("failed") return False return True