def get_graph_image(graph_dot): proc = LocalProcess('twopi', '-Tpng') proc.stdin.write(graph_dot) proc.stdin.close() if proc.status(timeout=20) is None: return 'Unable to render graph!' return proc.stdout.read_log() or 'Empty render result!'
def get_graph_image(graph_dot): proc = LocalProcess('twopi', '-Tpng') proc.stdin.write(graph_dot) proc.stdin.close() if proc.status(timeout = 20) is None: return 'Unable to render graph!' return proc.stdout.read_log() or 'Empty render result!'
def _submitJob(self, jobNum, module): fd, jdl = tempfile.mkstemp('.jdl') try: jdlData = self.makeJDL(jobNum, module) utils.safeWrite(os.fdopen(fd, 'w'), jdlData) except Exception: utils.removeFiles([jdl]) raise BackendError('Could not write jdl data to %s.' % jdl) try: submitArgs = [] for key_value in utils.filterDict(self._submitParams, vF = lambda v: v).items(): submitArgs.extend(key_value) submitArgs.append(jdl) activity = Activity('submitting job %d' % jobNum) proc = LocalProcess(self._submitExec, '--nomsg', '--noint', '--logfile', '/dev/stderr', *submitArgs) gcID = None for line in ifilter(lambda x: x.startswith('http'), imap(str.strip, proc.stdout.iter(timeout = 60))): gcID = line retCode = proc.status(timeout = 0, terminate = True) activity.finish() if (retCode != 0) or (gcID is None): if self.explainError(proc, retCode): pass else: self._log.log_process(proc, files = {'jdl': SafeFile(jdl).read()}) finally: utils.removeFiles([jdl]) return (jobNum, utils.QM(gcID, self._createId(gcID), None), {'jdl': str.join('', jdlData)})
def cancelJobs(self, allIds): if len(allIds) == 0: raise StopIteration waitFlag = False for ids in imap(lambda x: allIds[x:x+5], irange(0, len(allIds), 5)): # Delete jobs in groups of 5 - with 5 seconds between groups if waitFlag and not utils.wait(5): break waitFlag = True jobNumMap = dict(ids) jobs = self.writeWMSIds(ids) activity = utils.ActivityLog('cancelling jobs') proc = LocalProcess(self._cancelExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs) retCode = proc.status(timeout = 60, terminate = True) del activity # select cancelled jobs for deletedWMSId in ifilter(lambda x: x.startswith('- '), proc.stdout.iter()): deletedWMSId = self._createId(deletedWMSId.strip('- \n')) yield (jobNumMap.get(deletedWMSId), deletedWMSId) if retCode != 0: if self.explainError(proc, retCode): pass else: self._log.log_process(proc, files = {'jobs': utils.safeRead(jobs)}) utils.removeFiles([jobs])
def _get_jobs_output(self, gc_id_jobnum_list): # Get output of jobs and yield output dirs if len(gc_id_jobnum_list) == 0: raise StopIteration root_dn = os.path.join(self._path_output, 'tmp') try: if len(gc_id_jobnum_list) == 1: # For single jobs create single subdir tmp_dn = os.path.join(root_dn, md5_hex(gc_id_jobnum_list[0][0])) else: tmp_dn = root_dn ensure_dir_exists(tmp_dn) except Exception: raise BackendError( 'Temporary path "%s" could not be created.' % tmp_dn, BackendError) map_gc_id2jobnum = dict(gc_id_jobnum_list) jobs = self._write_wms_id_list(gc_id_jobnum_list) activity = Activity('retrieving %d job outputs' % len(gc_id_jobnum_list)) proc = LocalProcess(self._output_exec, '--noint', '--logfile', '/dev/stderr', '-i', jobs, '--dir', tmp_dn) # yield output dirs todo = map_gc_id2jobnum.values() current_jobnum = None for line in imap(str.strip, proc.stdout.iter(timeout=60)): if line.startswith(tmp_dn): todo.remove(current_jobnum) output_dn = line.strip() unpack_wildcard_tar(self._log, output_dn) yield (current_jobnum, output_dn) current_jobnum = None else: current_jobnum = map_gc_id2jobnum.get(self._create_gc_id(line), current_jobnum) exit_code = proc.status(timeout=0, terminate=True) activity.finish() if exit_code != 0: if 'Keyboard interrupt raised by user' in proc.stderr.read( timeout=0): remove_files([jobs, root_dn]) raise StopIteration else: self._log.log_process(proc, files={'jobs': SafeFile(jobs).read()}) self._log.error('Trying to recover from error ...') for dn in os.listdir(root_dn): yield (None, os.path.join(root_dn, dn)) # return unretrievable jobs for jobnum in todo: yield (jobnum, None) remove_files([jobs, tmp_dn])
def cancelJobs(self, allIds): if len(allIds) == 0: raise StopIteration waitFlag = False for ids in imap(lambda x: allIds[x:x + 5], irange(0, len(allIds), 5)): # Delete jobs in groups of 5 - with 5 seconds between groups if waitFlag and not utils.wait(5): break waitFlag = True jobNumMap = dict(ids) jobs = self.writeWMSIds(ids) activity = utils.ActivityLog('cancelling jobs') proc = LocalProcess(self._cancelExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs) retCode = proc.status(timeout=60, terminate=True) del activity # select cancelled jobs for deletedWMSId in ifilter(lambda x: x.startswith('- '), proc.stdout.iter()): deletedWMSId = self._createId(deletedWMSId.strip('- \n')) yield (jobNumMap.get(deletedWMSId), deletedWMSId) if retCode != 0: if self.explainError(proc, retCode): pass else: self._log.log_process(proc, files={'jobs': utils.safeRead(jobs)}) utils.removeFiles([jobs])
def get_jobs_output_chunk(self, tmp_dn, gc_id_jobnum_list, wms_id_list_done): map_gc_id2jobnum = dict(gc_id_jobnum_list) jobs = list(self._iter_wms_ids(gc_id_jobnum_list)) log = tempfile.mktemp('.log') proc = LocalProcess(self._output_exec, '--noint', '--logfile', log, '--dir', tmp_dn, *jobs) exit_code = proc.status(timeout=20 * len(jobs), terminate=True) # yield output dirs current_jobnum = None for line in imap(str.strip, proc.stdout.iter(timeout=20)): match = re.match(self._output_regex, line) if match: wms_id = match.groupdict()['rawId'] current_jobnum = map_gc_id2jobnum.get( self._create_gc_id(wms_id)) wms_id_list_done.append(wms_id) yield (current_jobnum, match.groupdict()['output_dn']) current_jobnum = None if exit_code != 0: if 'Keyboard interrupt raised by user' in proc.stdout.read_log(): remove_files([log, tmp_dn]) raise StopIteration else: self._log.log_process(proc) self._log.error('Trying to recover from error ...') for dn in os.listdir(tmp_dn): yield (None, os.path.join(tmp_dn, dn)) remove_files([log])
def _getJobsOutput(self, ids): if len(ids) == 0: raise StopIteration basePath = os.path.join(self._outputPath, 'tmp') try: if len(ids) == 1: # For single jobs create single subdir tmpPath = os.path.join(basePath, md5(ids[0][0]).hexdigest()) else: tmpPath = basePath utils.ensureDirExists(tmpPath) except Exception: raise BackendError('Temporary path "%s" could not be created.' % tmpPath, BackendError) jobNumMap = dict(ids) jobs = self.writeWMSIds(ids) activity = Activity('retrieving %d job outputs' % len(ids)) proc = LocalProcess(self._outputExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs, '--dir', tmpPath) # yield output dirs todo = jobNumMap.values() currentJobNum = None for line in imap(str.strip, proc.stdout.iter(timeout = 60)): if line.startswith(tmpPath): todo.remove(currentJobNum) outputDir = line.strip() if os.path.exists(outputDir): if 'GC_WC.tar.gz' in os.listdir(outputDir): wildcardTar = os.path.join(outputDir, 'GC_WC.tar.gz') try: tarfile.TarFile.open(wildcardTar, 'r:gz').extractall(outputDir) os.unlink(wildcardTar) except Exception: self._log.error('Can\'t unpack output files contained in %s', wildcardTar) yield (currentJobNum, line.strip()) currentJobNum = None else: currentJobNum = jobNumMap.get(self._createId(line), currentJobNum) retCode = proc.status(timeout = 0, terminate = True) activity.finish() if retCode != 0: if 'Keyboard interrupt raised by user' in proc.stderr.read(timeout = 0): utils.removeFiles([jobs, basePath]) raise StopIteration else: self._log.log_process(proc, files = {'jobs': SafeFile(jobs).read()}) self._log.error('Trying to recover from error ...') for dirName in os.listdir(basePath): yield (None, os.path.join(basePath, dirName)) # return unretrievable jobs for jobNum in todo: yield (jobNum, None) utils.removeFiles([jobs, basePath])
def _get_jobs_output(self, gc_id_jobnum_list): # Get output of jobs and yield output dirs if len(gc_id_jobnum_list) == 0: raise StopIteration root_dn = os.path.join(self._path_output, 'tmp') try: if len(gc_id_jobnum_list) == 1: # For single jobs create single subdir tmp_dn = os.path.join(root_dn, md5_hex(gc_id_jobnum_list[0][0])) else: tmp_dn = root_dn ensure_dir_exists(tmp_dn) except Exception: raise BackendError('Temporary path "%s" could not be created.' % tmp_dn, BackendError) map_gc_id2jobnum = dict(gc_id_jobnum_list) jobs = self._write_wms_id_list(gc_id_jobnum_list) activity = Activity('retrieving %d job outputs' % len(gc_id_jobnum_list)) proc = LocalProcess(self._output_exec, '--noint', '--logfile', '/dev/stderr', '-i', jobs, '--dir', tmp_dn) # yield output dirs todo = map_gc_id2jobnum.values() current_jobnum = None for line in imap(str.strip, proc.stdout.iter(timeout=60)): if line.startswith(tmp_dn): todo.remove(current_jobnum) output_dn = line.strip() unpack_wildcard_tar(self._log, output_dn) yield (current_jobnum, output_dn) current_jobnum = None else: current_jobnum = map_gc_id2jobnum.get(self._create_gc_id(line), current_jobnum) exit_code = proc.status(timeout=0, terminate=True) activity.finish() if exit_code != 0: if 'Keyboard interrupt raised by user' in proc.stderr.read(timeout=0): remove_files([jobs, root_dn]) raise StopIteration else: self._log.log_process(proc, files={'jobs': SafeFile(jobs).read()}) self._log.error('Trying to recover from error ...') for dn in os.listdir(root_dn): yield (None, os.path.join(root_dn, dn)) # return unretrievable jobs for jobnum in todo: yield (jobnum, None) remove_files([jobs, tmp_dn])
def _purge_done_jobs(self, wms_id_list_done): purge_log_fn = tempfile.mktemp('.log') purge_proc = LocalProcess(resolve_install_path('glite-ce-job-purge'), '--noint', '--logfile', purge_log_fn, str.join(' ', wms_id_list_done)) exit_code = purge_proc.status(timeout=60) if exit_code != 0: if self._explain_error(purge_proc, exit_code): pass else: self._log.log_process(purge_proc) remove_files([purge_log_fn])
def _recover_jobs(self): proc = LocalProcess('zip', '-FF', self._db_fn, '--out', '%s.tmp' % self._db_fn) proc.stdin.write('y\n') proc.status(timeout=None) os.rename(self._db_fn, self._db_fn + '.broken') os.rename(self._db_fn + '.tmp', self._db_fn) tar = zipfile.ZipFile(self._db_fn, 'r', zipfile.ZIP_DEFLATED) remove_files([self._db_fn + '.broken']) broken_fn_list = [] for tar_info_fn in tar.namelist(): try: tuple(imap(lambda s: int(s[1:]), tar_info_fn.split('_', 1))) # check name fp = tar.open(tar_info_fn) try: fp.read() finally: fp.close() except Exception: clear_current_exception() broken_fn_list.append(tar_info_fn) for broken in broken_fn_list: os.system('zip %s -d %s' % (self._db_fn, broken))
def matchSites(self, endpoint): activity = Activity('Discovering available WMS services - testing %s' % endpoint) checkArgs = ['-a'] if endpoint: checkArgs.extend(['-e', endpoint]) checkArgs.append(utils.pathShare('null.jdl')) proc = LocalProcess(self._exeGliteWMSJobListMatch, *checkArgs) result = [] for line in proc.stdout.iter(timeout = 3): if line.startswith(' - '): result.append(line[3:].strip()) activity.finish() if proc.status(timeout = 0) is None: self.wms_timeout[endpoint] = self.wms_timeout.get(endpoint, 0) + 1 if self.wms_timeout.get(endpoint, 0) > 10: # remove endpoints after 10 failures self.wms_all.remove(endpoint) return [] return result
def _match_sites(self, endpoint): activity = Activity('Discovering available WMS services - testing %s' % endpoint) check_arg_list = ['-a'] if endpoint: check_arg_list.extend(['-e', endpoint]) check_arg_list.append(get_path_share('null.jdl')) proc = LocalProcess(self._job_list_match_exec, *check_arg_list) result = [] for line in proc.stdout.iter(timeout=3): if line.startswith(' - '): result.append(line[3:].strip()) activity.finish() if proc.status(timeout=0) is None: self._wms_timeout_dict[endpoint] = self._wms_timeout_dict.get(endpoint, 0) + 1 if self._wms_timeout_dict.get(endpoint, 0) > 10: # remove endpoints after 10 failures self._wms_list_all.remove(endpoint) return [] return result
def _submit_job(self, jobnum, task): # Submit job and yield (jobnum, WMS ID, other data) jdl_fd, jdl_fn = tempfile.mkstemp('.jdl') try: jdl_line_list = self._make_jdl(jobnum, task) safe_write(os.fdopen(jdl_fd, 'w'), jdl_line_list) except Exception: remove_files([jdl_fn]) raise BackendError('Could not write jdl data to %s.' % jdl_fn) try: submit_arg_list = [] for key_value in filter_dict(self._submit_args_dict, value_filter=identity).items(): submit_arg_list.extend(key_value) submit_arg_list.append(jdl_fn) activity = Activity('submitting job %d' % jobnum) proc = LocalProcess(self._submit_exec, '--nomsg', '--noint', '--logfile', '/dev/stderr', *submit_arg_list) wms_id = None stripped_stdout_iter = imap(str.strip, proc.stdout.iter(timeout=60)) for line in ifilter(lambda x: x.startswith('http'), stripped_stdout_iter): wms_id = line exit_code = proc.status(timeout=0, terminate=True) activity.finish() if (exit_code != 0) or (wms_id is None): if self._explain_error(proc, exit_code): pass else: self._log.log_process( proc, files={'jdl': SafeFile(jdl_fn).read()}) finally: remove_files([jdl_fn]) job_data = {'jdl': str.join('', jdl_line_list)} return (jobnum, self._create_gc_id(wms_id), job_data)
def bulkSubmissionBegin(self): self._submitParams.update({ '-d': None }) if self._discovery_module: self._submitParams.update({ '-e': self._discovery_module.getWMS() }) if self._useDelegate is False: self._submitParams.update({ '-a': ' ' }) return True dID = 'GCD' + md5_hex(str(time.time()))[:10] activity = utils.ActivityLog('creating delegate proxy for job submission') deletegateArgs = [] if self._configVO: deletegateArgs.extend(['--config', self._configVO]) proc = LocalProcess(self._delegateExec, '-d', dID, '--noint', '--logfile', '/dev/stderr', *deletegateArgs) output = proc.get_output(timeout = 10, raise_errors = False) if ('glite-wms-job-delegate-proxy Success' in output) and (dID in output): self._submitParams.update({ '-d': dID }) del activity if proc.status(timeout = 0, terminate = True) != 0: self._log.log_process(proc) return (self._submitParams.get('-d', None) is not None)
def checkJobs(self, ids): if len(ids) == 0: raise StopIteration jobNumMap = dict(ids) jobs = self.writeWMSIds(ids) activity = utils.ActivityLog('checking job status') proc = LocalProcess(self._statusExec, '--verbosity', 1, '--noint', '--logfile', '/dev/stderr', '-i', jobs) for data in self._parseStatus(proc.stdout.iter(timeout = 60)): data['id'] = self._createId(data['id']) yield (jobNumMap.get(data['id']), data['id'], self._statusMap[data['status']], data) retCode = proc.status(timeout = 0, terminate = True) del activity if retCode != 0: if self.explainError(proc, retCode): pass else: self._log.log_process(proc, files = {'jobs': utils.safeRead(jobs)}) utils.removeFiles([jobs])
def _begin_bulk_submission(self): self._submit_args_dict.update({'-D': None}) if self._use_delegate is False: self._submit_args_dict.update({'-a': ' '}) return True delegate_id = 'GCD' + md5_hex(str(time.time()))[:10] activity = Activity('creating delegate proxy for job submission') delegate_arg_list = ['-e', self._ce[:self._ce.rfind("/")]] if self._config_fn: delegate_arg_list.extend(['--config', self._config_fn]) proc = LocalProcess(self._delegate_exec, '-d', delegate_id, '--logfile', '/dev/stderr', *delegate_arg_list) output = proc.get_output(timeout=10, raise_errors=False) if ('succesfully delegated to endpoint' in output) and (delegate_id in output): self._submit_args_dict.update({'-D': delegate_id}) activity.finish() if proc.status(timeout=0, terminate=True) != 0: self._log.log_process(proc) return self._submit_args_dict.get('-D') is not None
def _begin_bulk_submission(self): self._submit_args_dict.update({'-d': None}) if self._discovery_plugin: self._submit_args_dict.update({'-e': self._discovery_plugin.get_endpoint()}) if self._use_delegate is False: self._submit_args_dict.update({'-a': ' '}) return True delegate_id = 'GCD' + md5_hex(str(time.time()))[:10] activity = Activity('creating delegate proxy for job submission') delegate_arg_list = [] if self._config_fn: delegate_arg_list.extend(['--config', self._config_fn]) proc = LocalProcess(self._delegate_exec, '-d', delegate_id, '--noint', '--logfile', '/dev/stderr', *delegate_arg_list) output = proc.get_output(timeout=10, raise_errors=False) if ('glite-wms-job-delegate-proxy Success' in output) and (delegate_id in output): self._submit_args_dict.update({'-d': delegate_id}) activity.finish() if proc.status(timeout=0, terminate=True) != 0: self._log.log_process(proc) return self._submit_args_dict.get('-d') is not None
def _submitJob(self, jobNum, module): fd, jdl = tempfile.mkstemp('.jdl') try: jdlData = self.makeJDL(jobNum, module) utils.safeWrite(os.fdopen(fd, 'w'), jdlData) except Exception: utils.removeFiles([jdl]) raise BackendError('Could not write jdl data to %s.' % jdl) try: submitArgs = [] for key_value in utils.filterDict(self._submitParams, vF=lambda v: v).items(): submitArgs.extend(key_value) submitArgs.append(jdl) activity = Activity('submitting job %d' % jobNum) proc = LocalProcess(self._submitExec, '--nomsg', '--noint', '--logfile', '/dev/stderr', *submitArgs) gcID = None for line in ifilter(lambda x: x.startswith('http'), imap(str.strip, proc.stdout.iter(timeout=60))): gcID = line retCode = proc.status(timeout=0, terminate=True) activity.finish() if (retCode != 0) or (gcID is None): if self.explainError(proc, retCode): pass else: self._log.log_process(proc, files={'jdl': SafeFile(jdl).read()}) finally: utils.removeFiles([jdl]) return (jobNum, utils.QM(gcID, self._createId(gcID), None), { 'jdl': str.join('', jdlData) })
def _match_sites(self, endpoint): activity = Activity('Discovering available WMS services - testing %s' % endpoint) check_arg_list = ['-a'] if endpoint: check_arg_list.extend(['-e', endpoint]) check_arg_list.append(get_path_share('null.jdl')) proc = LocalProcess(self._job_list_match_exec, *check_arg_list) result = [] for line in proc.stdout.iter(timeout=3): if line.startswith(' - '): result.append(line[3:].strip()) activity.finish() if proc.status(timeout=0) is None: self._wms_timeout_dict[endpoint] = self._wms_timeout_dict.get( endpoint, 0) + 1 if self._wms_timeout_dict.get( endpoint, 0) > 10: # remove endpoints after 10 failures self._wms_list_all.remove(endpoint) return [] return result
def _submit_job(self, jobnum, task): # Submit job and yield (jobnum, WMS ID, other data) jdl_fd, jdl_fn = tempfile.mkstemp('.jdl') try: jdl_line_list = self._make_jdl(jobnum, task) safe_write(os.fdopen(jdl_fd, 'w'), jdl_line_list) except Exception: remove_files([jdl_fn]) raise BackendError('Could not write jdl data to %s.' % jdl_fn) try: submit_arg_list = [] for key_value in filter_dict(self._submit_args_dict, value_filter=identity).items(): submit_arg_list.extend(key_value) submit_arg_list.append(jdl_fn) activity = Activity('submitting job %d' % jobnum) proc = LocalProcess(self._submit_exec, '--nomsg', '--noint', '--logfile', '/dev/stderr', *submit_arg_list) wms_id = None stripped_stdout_iter = imap(str.strip, proc.stdout.iter(timeout=60)) for line in ifilter(lambda x: x.startswith('http'), stripped_stdout_iter): wms_id = line exit_code = proc.status(timeout=0, terminate=True) activity.finish() if (exit_code != 0) or (wms_id is None): if self._explain_error(proc, exit_code): pass else: self._log.log_process(proc, files={'jdl': SafeFile(jdl_fn).read()}) finally: remove_files([jdl_fn]) job_data = {'jdl': str.join('', jdl_line_list)} return (jobnum, self._create_gc_id(wms_id), job_data)
def checkJobs(self, ids): if len(ids) == 0: raise StopIteration jobNumMap = dict(ids) jobs = self.writeWMSIds(ids) activity = utils.ActivityLog('checking job status') proc = LocalProcess(self._statusExec, '--verbosity', 1, '--noint', '--logfile', '/dev/stderr', '-i', jobs) for data in self._parseStatus(proc.stdout.iter(timeout=60)): data['id'] = self._createId(data['id']) yield (jobNumMap.get(data['id']), data['id'], self._statusMap[data['status']], data) retCode = proc.status(timeout=0, terminate=True) del activity if retCode != 0: if self.explainError(proc, retCode): pass else: self._log.log_process(proc, files={'jobs': utils.safeRead(jobs)}) utils.removeFiles([jobs])
def _getJobsOutput(self, ids): if len(ids) == 0: raise StopIteration basePath = os.path.join(self._outputPath, 'tmp') try: if len(ids) == 1: # For single jobs create single subdir tmpPath = os.path.join(basePath, md5(ids[0][0]).hexdigest()) else: tmpPath = basePath utils.ensureDirExists(tmpPath) except Exception: raise BackendError( 'Temporary path "%s" could not be created.' % tmpPath, BackendError) jobNumMap = dict(ids) jobs = self.writeWMSIds(ids) activity = Activity('retrieving %d job outputs' % len(ids)) proc = LocalProcess(self._outputExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs, '--dir', tmpPath) # yield output dirs todo = jobNumMap.values() currentJobNum = None for line in imap(str.strip, proc.stdout.iter(timeout=60)): if line.startswith(tmpPath): todo.remove(currentJobNum) outputDir = line.strip() if os.path.exists(outputDir): if 'GC_WC.tar.gz' in os.listdir(outputDir): wildcardTar = os.path.join(outputDir, 'GC_WC.tar.gz') try: tarfile.TarFile.open(wildcardTar, 'r:gz').extractall(outputDir) os.unlink(wildcardTar) except Exception: self._log.error( 'Can\'t unpack output files contained in %s', wildcardTar) yield (currentJobNum, line.strip()) currentJobNum = None else: currentJobNum = jobNumMap.get(self._createId(line), currentJobNum) retCode = proc.status(timeout=0, terminate=True) activity.finish() if retCode != 0: if 'Keyboard interrupt raised by user' in proc.stderr.read( timeout=0): utils.removeFiles([jobs, basePath]) raise StopIteration else: self._log.log_process(proc, files={'jobs': SafeFile(jobs).read()}) self._log.error('Trying to recover from error ...') for dirName in os.listdir(basePath): yield (None, os.path.join(basePath, dirName)) # return unretrievable jobs for jobNum in todo: yield (jobNum, None) utils.removeFiles([jobs, basePath])
def _begin_bulk_submission(self): self._set_proxy_lifetime() if self._end_of_proxy_lifetime is None: raise Exception("_end_of_proxy_lifetime is not set") if self._delegated_proxy_filename is None: raise Exception("_delegated_proxy_filename is not set") if self._end_of_proxy_lifetime <= time.time(): self._log.info( "renew proxy is necessary: %s <= %s" % (str(self._end_of_proxy_lifetime), str(time.time()))) x = threading.Thread(target=CreamWMS.delfile, args=(self._lock_filename, 0, self._log)) x.start() y = threading.Thread(target=CreamWMS.delfile, args=(self._delegated_proxy_filename, 0, self._log)) y.start() raise Exception("renew proxy is necessary") elif '-D' in self._submit_args_dict.keys( ) and self._submit_args_dict['-D'] is not None: try: left_time_str = timedelta(seconds=self._end_of_proxy_lifetime - time.time()) except: left_time_str = str(self._end_of_proxy_lifetime - time.time()) + ' sec.' self._log.info( "Proxy delegation IS NOT ISSUED since expected to be OK. left: %s " % left_time_str) else: if os.path.isfile(self._delegated_proxy_filename): file = open(self._delegated_proxy_filename, "r") delegate_id = file.read() file.close() # file is empty -> another process edditing it? # if delegate_id is None or delegate_id == '': return False if delegate_id is not None and delegate_id != "": self._submit_args_dict.update({'-D': delegate_id}) self._log.info('Proxy delegation read from a file: %s ' % (delegate_id)) elif not os.path.isfile( self._delegated_proxy_lock ): #not os.path.isfile(self._delegated_proxy_filename): file_lock = open(self._delegated_proxy_lock, "w+") file = open(self._delegated_proxy_filename, "w+") activity = Activity('Delegating proxy for job submission') self._submit_args_dict.update({'-D': None}) #if self._use_delegate is False: # self._submit_args_dict.update({'-a': ' '}) # return True t = time.time() thehex = md5_hex(str(t)) self._log.info('Proxy delegation full hex: %s at time %s' % (thehex, str(t))) delegate_id = 'GCD' + thehex[:15] delegate_arg_list = ['-e', self._ce[:self._ce.rfind("/")]] if self._config_fn: delegate_arg_list.extend(['--config', self._config_fn]) proc = LocalProcess(self._delegate_exec, '-d', delegate_id, '--logfile', '/dev/stderr', *delegate_arg_list) output = proc.get_output(timeout=10, raise_errors=False) if ('succesfully delegated to endpoint' in output) and (delegate_id in output): self._submit_args_dict.update({'-D': delegate_id}) activity.finish() if proc.status(timeout=0, terminate=True) != 0: self._log.log_process(proc) file.write(delegate_id) file.close() file_lock.close() y = threading.Thread(target=CreamWMS.delfile, args=(self._delegated_proxy_lock, 0, self._log)) y.start() return self._submit_args_dict.get('-D') is not None