Exemplo n.º 1
0
	def _get_sandbox_file_list(self, task, sm_list):
		# Prepare all input files
		dep_list = set(ichain(imap(lambda x: x.get_dependency_list(), [task] + sm_list)))
		dep_fn_list = lmap(lambda dep: resolve_path('env.%s.sh' % dep,
			lmap(lambda pkg: get_path_share('', pkg=pkg), os.listdir(get_path_pkg()))), dep_list)
		task_config_dict = dict_union(self._remote_event_handler.get_mon_env_dict(),
			*imap(lambda x: x.get_task_dict(), [task] + sm_list))
		task_config_dict.update({'GC_DEPFILES': str.join(' ', dep_list),
			'GC_USERNAME': self._token.get_user_name(), 'GC_WMS_NAME': self._name})
		task_config_str_list = DictFormat(escape_strings=True).format(
			task_config_dict, format='export %s%s%s\n')
		vn_alias_dict = dict(izip(self._remote_event_handler.get_mon_env_dict().keys(),
			self._remote_event_handler.get_mon_env_dict().keys()))
		vn_alias_dict.update(task.get_var_alias_map())
		vn_alias_str_list = DictFormat(delimeter=' ').format(vn_alias_dict, format='%s%s%s\n')

		# Resolve wildcards in task input files
		def _get_task_fn_list():
			for fpi in task.get_sb_in_fpi_list():
				matched = glob.glob(fpi.path_abs)
				if matched != []:
					for match in matched:
						yield match
				else:
					yield fpi.path_abs
		return lchain([self._remote_event_handler.get_file_list(), dep_fn_list, _get_task_fn_list(), [
			VirtualFile('_config.sh', sorted(task_config_str_list)),
			VirtualFile('_varmap.dat', sorted(vn_alias_str_list))]])
Exemplo n.º 2
0
	def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
		job_info_path = os.path.join(item, 'job.info')
		try:
			job_info_dict = DictFormat('=').parse(open(job_info_path))
			if job_info_dict.get('exitcode') == 0:
				obj_dict['JOBINFO'] = job_info_dict
				yield (item, metadata_dict, entries, location_list, obj_dict)
		except Exception:
			self._log.log(logging.INFO2, 'Unable to parse job info file %r', job_info_path)
			clear_current_exception()
Exemplo n.º 3
0
 def __init__(self, config, job_limit=-1, job_selector=None):
     JobDB.__init__(self, config, job_limit, job_selector)
     self._path_db = config.get_work_path('jobs')
     self._fmt = DictFormat(escape_strings=True)
     try:
         self._job_map = self._read_jobs(self._job_limit)
     except Exception:
         raise JobError('Unable to read stored job information!')
     if self._job_limit < 0 and len(self._job_map) > 0:
         self._job_limit = max(self._job_map) + 1
Exemplo n.º 4
0
 def _iter_datasource_items(self, item, metadata_dict, entries,
                            location_list, obj_dict):
     job_info_path = os.path.join(item, 'job.info')
     try:
         job_info_dict = DictFormat('=').parse(open(job_info_path))
         if job_info_dict.get('exitcode') == 0:
             obj_dict['JOBINFO'] = job_info_dict
             yield (item, metadata_dict, entries, location_list, obj_dict)
     except Exception:
         self._log.log(logging.INFO2, 'Unable to parse job info file %r',
                       job_info_path)
         clear_current_exception()
Exemplo n.º 5
0
	def _parse(self, proc):
		for section in accumulate(proc.stdout.iter(self._timeout), '', lambda x, buf: x == '\n'):
			try:
				lines = section.replace('\n\t', '').split('\n')
				job_info = DictFormat(' = ').parse(lines[1:])
				job_info[CheckInfo.WMSID] = lines[0].split(':')[1].split('.')[0].strip()
				job_info[CheckInfo.RAW_STATUS] = job_info.pop('job_state')
				job_info[CheckInfo.QUEUE] = job_info.pop('queue', None)
				if 'exec_host' in job_info:
					exec_host = job_info.pop('exec_host').split('/')[0]
					job_info[CheckInfo.WN] = exec_host + '.' + job_info.get('server', '')
			except Exception:
				raise BackendError('Error reading job info:\n%s' % section)
			yield job_info
Exemplo n.º 6
0
class JobInfoProcessor(OutputProcessor):
	def __init__(self):
		OutputProcessor.__init__(self)
		self._df = DictFormat()

	def process(self, dn):
		fn = os.path.join(dn, 'job.info')
		try:
			if not os.path.exists(fn):
				raise JobResultError('Job result file %r does not exist' % fn)
			try:
				info_content = open(fn, 'r').read()
			except Exception:
				raise JobResultError('Unable to read job result file %r' % fn)
			if not info_content:
				raise JobResultError('Job result file %r is empty' % fn)
			data = self._df.parse(info_content, keyParser = {None: str}) # impossible to fail
			try:
				jobNum = data.pop('JOBID')
				exitCode = data.pop('EXITCODE')
				return {JobResult.JOBNUM: jobNum, JobResult.EXITCODE: exitCode, JobResult.RAW: data}
			except Exception:
				raise JobResultError('Job result file %r is incomplete' % fn)
		except Exception:
			raise JobResultError('Unable to process output directory %r' % dn)
Exemplo n.º 7
0
class JobInfoProcessor(OutputProcessor):
	alias_list = ['jobinfo']

	def __init__(self):
		OutputProcessor.__init__(self)
		self._df = DictFormat()

	def process(self, dn):
		fn = os.path.join(dn, 'job.info')
		try:
			if not os.path.exists(fn):
				raise JobResultError('Job result file %r does not exist' % fn)
			try:
				info_content = SafeFile(fn).read_close()
			except Exception:
				raise JobResultError('Unable to read job result file %r' % fn)
			if not info_content:
				raise JobResultError('Job result file %r is empty' % fn)
			data = self._df.parse(info_content, key_parser={None: str})  # impossible to fail
			try:
				jobnum = data.pop('JOBID')
				exit_code = data.pop('EXITCODE')
				message = data.pop('MESSAGE', None)
				return {JobResult.JOBNUM: jobnum, JobResult.EXITCODE: exit_code,
					JobResult.MESSAGE: message, JobResult.RAW: data}
			except Exception:
				raise JobResultError('Job result file %r is incomplete' % fn)
		except Exception:
			raise JobResultError('Unable to process output directory %r' % dn)
Exemplo n.º 8
0
	def logError(self, target, brief=False, **kwargs): # Can also log content of additional files via kwargs
		now = time.time()
		entry = '%s.%s' % (time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(now)), ('%.5f' % (now - int(now)))[2:])
		self._logger.log_time(logging.WARNING, '%s failed with code %d', self.niceCmd, self.wait())
		if not brief:
			self._logger.log_time(logging.WARNING, '\n%s', self.getError())

		try:
			tar = tarfile.TarFile.open(target, 'a')
			data = {'retCode': self.wait(), 'exec': self.cmd, 'args': self.args}
			files = [VirtualFile(os.path.join(entry, 'info'), DictFormat().format(data))]
			kwargs.update({'stdout': self.getOutput(), 'stderr': self.getError()})
			for key, value in kwargs.items():
				try:
					content = open(value, 'r').readlines()
				except Exception:
					content = [value]
				files.append(VirtualFile(os.path.join(entry, key), content))
			for fileObj in files:
				info, handle = fileObj.getTarInfo()
				tar.addfile(info, handle)
				handle.close()
			tar.close()
		except Exception:
			raise GCError('Unable to log errors of external process "%s" to "%s"' % (self.niceCmd, target))
		self._logger.info('All logfiles were moved to %s', target)
Exemplo n.º 9
0
class JobInfoProcessor(OutputProcessor):
    def __init__(self):
        OutputProcessor.__init__(self)
        self._df = DictFormat()

    def process(self, dn):
        fn = os.path.join(dn, 'job.info')
        if not os.path.exists(fn):
            raise JobResultError('Job result file %r does not exist' % fn)
        try:
            info_content = open(fn, 'r').read()
        except Exception:
            raise JobResultError('Unable to read job result file %r' % fn)
        if not info_content:
            raise JobResultError('Job result file %r is empty' % fn)
        try:
            data = self._df.parse(info_content, keyParser={None: str})
        except Exception:
            raise JobResultError('Unable to parse job result file %r' % fn)
        try:
            jobNum = data.pop('JOBID')
            exitCode = data.pop('EXITCODE')
            return {
                JobResult.JOBNUM: jobNum,
                JobResult.EXITCODE: exitCode,
                JobResult.RAW: data
            }
        except Exception:
            raise JobResultError('Job result file %r is incomplete' % fn)
Exemplo n.º 10
0
class JobInfoProcessor(OutputProcessor):
    alias_list = ['jobinfo']

    def __init__(self):
        OutputProcessor.__init__(self)
        self._df = DictFormat()

    def process(self, dn):
        fn = os.path.join(dn, 'job.info')
        try:
            if not os.path.exists(fn):
                raise JobResultError('Job result file %r does not exist' % fn)
            try:
                info_content = SafeFile(fn).read_close()
            except Exception:
                raise JobResultError('Unable to read job result file %r' % fn)
            if not info_content:
                raise JobResultError('Job result file %r is empty' % fn)
            data = self._df.parse(info_content,
                                  key_parser={None: str})  # impossible to fail
            try:
                jobnum = data.pop('JOBID')
                exit_code = data.pop('EXITCODE')
                message = data.pop('MESSAGE', None)
                return {
                    JobResult.JOBNUM: jobnum,
                    JobResult.EXITCODE: exit_code,
                    JobResult.MESSAGE: message,
                    JobResult.RAW: data
                }
            except Exception:
                raise JobResultError('Job result file %r is incomplete' % fn)
        except Exception:
            raise JobResultError('Unable to process output directory %r' % dn)
Exemplo n.º 11
0
	def _write_job_config(self, job_config_fn, jobnum, task, extras):
		try:
			job_env_dict = dict_union(task.get_job_dict(jobnum), extras)
			job_env_dict['GC_ARGS'] = task.get_job_arguments(jobnum).strip()
			content = DictFormat(escape_strings=True).format(job_env_dict, format='export %s%s%s\n')
			safe_write(open(job_config_fn, 'w'), content)
		except Exception:
			raise BackendError('Could not write job config data to %s.' % job_config_fn)
Exemplo n.º 12
0
	def __init__(self, config, job_limit=-1, job_selector=None):
		JobDB.__init__(self, config, job_limit, job_selector)
		self._path_db = config.get_work_path('jobs')
		self._fmt = DictFormat(escape_strings=True)
		try:
			self._job_map = self._read_jobs(self._job_limit)
		except Exception:
			raise JobError('Unable to read stored job information!')
		if self._job_limit < 0 and len(self._job_map) > 0:
			self._job_limit = max(self._job_map) + 1
Exemplo n.º 13
0
	def __init__(self, path):
		activity = Activity('Reading dataset partition file')
		self._fmt = DictFormat()
		try:
			self._tar = tarfile.open(path, 'r:')

			metadata = self._fmt.parse(self._tar.extractfile('Metadata').readlines(), key_parser={None: str})
			FilePartitionReader.__init__(self, path, metadata.pop('MaxJobs'))
			self._metadata = metadata
			activity.finish()
		except Exception:
			raise PartitionReaderError('No valid dataset splitting found in %s' % path)

		self._map_enum2parser = {
			None: str,
			DataSplitter.NEntries: int, DataSplitter.Skipped: int,
			DataSplitter.Invalid: parse_bool,
			DataSplitter.Locations: lambda x: parse_list(x, ','),
			DataSplitter.MetadataHeader: parse_json,
			DataSplitter.Metadata: lambda x: parse_json(x.strip("'"))
		}
		(self._cache_nested_fn, self._cache_nested_tar) = (None, None)
Exemplo n.º 14
0
 def _parseProxy(self, cached=True):
     # Return cached results if requested
     if cached and self._cache:
         return self._cache
     # Call voms-proxy-info and parse results
     proc = LocalProcess(self._infoExec, *self._getProxyArgs())
     (retCode, stdout, stderr) = proc.finish(timeout=10)
     if (retCode != 0) and not self._ignoreWarning:
         msg = ('%s output:\n%s\n%s\n' %
                (self._infoExec, stdout, stderr)).replace('\n\n', '\n')
         msg += 'If job submission is still possible, you can set [access] ignore warnings = True\n'
         raise AccessTokenError(msg + '%s failed with return code %d' %
                                (self._infoExec, retCode))
     self._cache = DictFormat(':').parse(stdout)
     return self._cache
Exemplo n.º 15
0
class TarPartitionReader(FilePartitionReader):
	def __init__(self, path):
		activity = Activity('Reading dataset partition file')
		self._fmt = DictFormat()
		try:
			self._tar = tarfile.open(path, 'r:')

			metadata = self._fmt.parse(self._tar.extractfile('Metadata').readlines(), key_parser={None: str})
			FilePartitionReader.__init__(self, path, metadata.pop('MaxJobs'))
			self._metadata = metadata
			activity.finish()
		except Exception:
			raise PartitionReaderError('No valid dataset splitting found in %s' % path)

		self._map_enum2parser = {
			None: str,
			DataSplitter.NEntries: int, DataSplitter.Skipped: int,
			DataSplitter.Invalid: parse_bool,
			DataSplitter.Locations: lambda x: parse_list(x, ','),
			DataSplitter.MetadataHeader: parse_json,
			DataSplitter.Metadata: lambda x: parse_json(x.strip("'"))
		}
		(self._cache_nested_fn, self._cache_nested_tar) = (None, None)

	def _combine_partition_parts(self, partition, url_list):
		if DataSplitter.CommonPrefix in partition:
			common_prefix = partition.pop(DataSplitter.CommonPrefix)
			url_list = imap(lambda x: '%s/%s' % (common_prefix, x), url_list)
		partition[DataSplitter.FileList] = lmap(str.strip, url_list)
		return partition

	def _get_nested_tar(self, nested_fn):
		if self._cache_nested_fn != nested_fn:  # caching gives 3-4x speedup for sequential access
			self._cache_nested_tar = self._open_nested_tar(nested_fn)
			self._cache_nested_fn = nested_fn
		return self._cache_nested_tar

	def _open_nested_tar(self, nested_fn):
		nested_tar_fp = self._tar.extractfile(nested_fn)
		nested_tar_fp = BytesBuffer(gzip.GzipFile(fileobj=nested_tar_fp).read())
		return tarfile.open(mode='r', fileobj=nested_tar_fp)
Exemplo n.º 16
0
 def _parse_proxy(self, cached=True):
     # Return cached results if requested
     if cached and self._cache:
         return self._cache
     # Call voms-proxy-info and parse results
     proc = LocalProcess(self._proxy_info_exec,
                         *self._get_proxy_info_arguments())
     (exit_code, stdout, stderr) = proc.finish(timeout=10)
     if (exit_code != 0) and not self._ignore_warning:
         msg = ('%s output:\n%s\n%s\n' %
                (self._proxy_info_exec, stdout, stderr)).replace(
                    '\n\n', '\n')
         msg += 'If job submission is still possible, you can set [access] ignore warnings = True\n'
         msg += '%s failed with return code %d' % (self._proxy_info_exec,
                                                   exit_code)
         raise AccessTokenError(msg)
     self._cache = DictFormat(':').parse(stdout)
     if not self._cache:
         msg = 'Unable to parse access token information:\n\t%s\n\t%s\n'
         raise AccessTokenError(msg % (stdout.strip(), stderr.strip()))
     return self._cache
Exemplo n.º 17
0
 def _parse(self, proc):
     for section in accumulate(proc.stdout.iter(self._timeout), '',
                               lambda x, buf: x == '\n'):
         try:
             lines = section.replace('\n\t', '').split('\n')
             job_info = DictFormat(' = ').parse(lines[1:])
             job_info[CheckInfo.WMSID] = lines[0].split(':')[1].split(
                 '.')[0].strip()
             job_info[CheckInfo.RAW_STATUS] = job_info.pop('job_state')
             job_info[CheckInfo.QUEUE] = job_info.pop('queue', None)
             if 'exec_host' in job_info:
                 exec_host = job_info.pop('exec_host').split('/')[0]
                 job_info[CheckInfo.WN] = exec_host + '.' + job_info.get(
                     'server', '')
         except Exception:
             raise BackendError('Error reading job info:\n%s' % section)
         yield job_info
Exemplo n.º 18
0
class TextFileJobDB(JobDB):
	alias_list = ['textdb']

	def __init__(self, config, job_limit=-1, job_selector=None):
		JobDB.__init__(self, config, job_limit, job_selector)
		self._path_db = config.get_work_path('jobs')
		self._fmt = DictFormat(escape_strings=True)
		try:
			self._job_map = self._read_jobs(self._job_limit)
		except Exception:
			raise JobError('Unable to read stored job information!')
		if self._job_limit < 0 and len(self._job_map) > 0:
			self._job_limit = max(self._job_map) + 1

	def commit(self, jobnum, job_obj):
		with_file(SafeFile(os.path.join(self._path_db, 'job_%d.txt' % jobnum), 'w'),
			lambda fp: fp.writelines(self._fmt.format(self._serialize_job_obj(job_obj))))
		self._job_map[jobnum] = job_obj

	def get_job(self, jobnum):
		return self._job_map.get(jobnum)

	def get_job_persistent(self, jobnum):
		return self._job_map.get(jobnum, Job())

	def get_job_transient(self, jobnum):
		return self._job_map.get(jobnum, self._default_job_obj)

	def _create_job_obj(self, name, data):
		try:
			job = Job()
			job.state = Job.str2enum(data.pop('status'), Job.UNKNOWN)

			if 'id' in data:
				gc_id = data.pop('id')
				if not gc_id.startswith('WMSID'):  # Legacy support
					data['legacy_gc_id'] = gc_id
					if gc_id.startswith('https'):
						gc_id = 'WMSID.GLITEWMS.%s' % gc_id
					else:
						wms_id, wms_name = tuple(gc_id.split('.', 1))
						gc_id = 'WMSID.%s.%s' % (wms_name, wms_id)
				job.gc_id = gc_id

			for key in ['attempt', 'submitted', 'changed']:
				if key in data:
					setattr(job, key, data[key])
			if 'runtime' not in data:
				if 'submitted' in data and (job.submitted > 0):
					data['runtime'] = time.time() - float(job.submitted)
				else:
					data['runtime'] = 0
			for key in irange(1, job.attempt + 1):
				if ('history_' + str(key)).strip() in data:
					job.history[key] = data['history_' + str(key)]
			job.set_dict(data)
		except Exception:
			raise JobError('Unable to parse data in %s:\n%r' % (name, data))
		return job

	def _read_jobs(self, job_limit):
		ensure_dir_exists(self._path_db, 'job database directory', JobError)

		candidates = []
		for job_fn in fnmatch.filter(os.listdir(self._path_db), 'job_*.txt'):
			try:  # 2xsplit is faster than regex
				jobnum = int(job_fn.split(".")[0].split("_")[1])
			except Exception:
				clear_current_exception()
				continue
			candidates.append((jobnum, job_fn))

		(job_map, max_job_len) = ({}, len(candidates))
		activity = Activity('Reading job infos')
		idx = 0
		for (jobnum, job_fn) in sorted(candidates):
			idx += 1
			if jobnum >= job_limit >= 0:
				self._log.info('Stopped reading job infos at job #%d out of %d available job files, ' +
					'since the limit of %d jobs is reached', jobnum, len(candidates), job_limit)
				break
			try:
				job_fn_full = os.path.join(self._path_db, job_fn)
				data = self._fmt.parse(SafeFile(job_fn_full).iter_close())
				job_obj = self._create_job_obj(job_fn_full, data)
			except Exception:
				raise JobError('Unable to process job file %r' % job_fn_full)
			job_map[jobnum] = job_obj
			activity.update('Reading job infos %d [%d%%]' % (idx, (100.0 * idx) / max_job_len))
		activity.finish()
		return job_map

	def _serialize_job_obj(self, job_obj):
		data = job_obj.get_dict_full()
		for key, value in job_obj.history.items():
			data['history_' + str(key)] = value
		if job_obj.gc_id is not None:
			data['id'] = job_obj.get('legacy_gc_id') or job_obj.gc_id  # store legacy gc_id
		return data
Exemplo n.º 19
0
 def __init__(self):
     OutputProcessor.__init__(self)
     self._df = DictFormat()
Exemplo n.º 20
0
class JDLWriter(object):
	def __init__(self):
		self._esc_dict = {'\\': r'\\', '\"': r'\"', '\n': r'\n'}
		self._fmt = DictFormat(' = ')

	def format(self, req_list, result=None):
		contents = self.prepare(req_list, result)
		return self._fmt.format(contents, format='%s%s%s;\n')

	def prepare(self, req_list, result=None):
		result = result or dict()
		self._format_reqs(req_list, result)
		self._format_cpus(req_list, result)
		return result

	def _escape(self, value):
		return '"' + str.join('', imap(lambda char: self._esc_dict.get(char, char), value)) + '"'

	def _format_cpus(self, req_list, result):
		for req_type, arg in req_list:
			if (req_type == WMS.CPUS) and (arg > 1):
				result['CpuNumber'] = arg

	def _format_reqs(self, req_list, result):
		req_string_list = ['other.GlueHostNetworkAdapterOutboundIP']
		for req_type, arg in req_list:
			if req_type == WMS.SOFTWARE:
				software_template_str = 'Member(%s, other.GlueHostApplicationSoftwareRunTimeEnvironment)'
				req_string_list.append(software_template_str % self._escape(arg))
			elif req_type == WMS.WALLTIME:
				if arg > 0:
					req_string_list.append('(other.GlueCEPolicyMaxWallClockTime >= %d)' % int((arg + 59) / 60))
			elif req_type == WMS.CPUTIME:
				if arg > 0:
					req_string_list.append('(other.GlueCEPolicyMaxCPUTime >= %d)' % int((arg + 59) / 60))
			elif req_type == WMS.MEMORY:
				if arg > 0:
					req_string_list.append('(other.GlueHostMainMemoryRAMSize >= %d)' % arg)
			elif req_type == WMS.STORAGE:
				req_string_list.append(self._format_reqs_storage(arg))
			elif req_type == WMS.SITES:
				req_string_list.append(self._format_reqs_sites(arg))
			elif req_type in (WMS.CPUS, WMS.DISKSPACE):
				pass  # Handled outside of "requirement" directive or GlueCE attribute not available
			else:
				raise APIError('Unknown requirement type %r or argument %r' % (WMS.enum2str(req_type), arg))
		result['Requirements'] = str.join(' && ', iidfilter(req_string_list))

	def _format_reqs_sites(self, sites):
		def _fmt_sites(site):
			return 'RegExp(%s, other.GlueCEUniqueID)' % self._escape(site)
		(blacklist, whitelist) = split_blackwhite_list(sites)
		sitereqs = lmap(lambda x: '!' + _fmt_sites(x), blacklist)
		if whitelist:
			sitereqs.append('(%s)' % str.join(' || ', imap(_fmt_sites, whitelist)))
		if sitereqs:
			return '( %s )' % str.join(' && ', sitereqs)

	def _format_reqs_storage(self, locations):
		if locations:
			location_template_str = 'Member(%s, other.GlueCESEBindGroupSEUniqueID)'
			location_iter = imap(lambda x: location_template_str % self._escape(x), locations)
			return '( %s )' % str.join(' || ', location_iter)
Exemplo n.º 21
0
	def __init__(self):
		self._esc_dict = {'\\': r'\\', '\"': r'\"', '\n': r'\n'}
		self._fmt = DictFormat(' = ')
Exemplo n.º 22
0
	def __init__(self):
		PartitionWriter.__init__(self)
		self._fmt = DictFormat()  # use a single instance to save time
Exemplo n.º 23
0
	def __init__(self):
		OutputProcessor.__init__(self)
		self._df = DictFormat()
Exemplo n.º 24
0
class TextFileJobDB(JobDB):
    alias_list = ['textdb']

    def __init__(self, config, job_limit=-1, job_selector=None):
        JobDB.__init__(self, config, job_limit, job_selector)
        self._path_db = config.get_work_path('jobs')
        self._fmt = DictFormat(escape_strings=True)
        try:
            self._job_map = self._read_jobs(self._job_limit)
        except Exception:
            raise JobError('Unable to read stored job information!')
        if self._job_limit < 0 and len(self._job_map) > 0:
            self._job_limit = max(self._job_map) + 1

    def commit(self, jobnum, job_obj):
        with_file(
            SafeFile(os.path.join(self._path_db, 'job_%d.txt' % jobnum), 'w'),
            lambda fp: fp.writelines(
                self._fmt.format(self._serialize_job_obj(job_obj))))
        self._job_map[jobnum] = job_obj

    def get_job(self, jobnum):
        return self._job_map.get(jobnum)

    def get_job_persistent(self, jobnum):
        return self._job_map.get(jobnum, Job())

    def get_job_transient(self, jobnum):
        return self._job_map.get(jobnum, self._default_job_obj)

    def _create_job_obj(self, name, data):
        try:
            job = Job()
            job.state = Job.str2enum(data.pop('status'), Job.UNKNOWN)

            if 'id' in data:
                gc_id = data.pop('id')
                if not gc_id.startswith('WMSID'):  # Legacy support
                    data['legacy_gc_id'] = gc_id
                    if gc_id.startswith('https'):
                        gc_id = 'WMSID.GLITEWMS.%s' % gc_id
                    else:
                        wms_id, wms_name = tuple(gc_id.split('.', 1))
                        gc_id = 'WMSID.%s.%s' % (wms_name, wms_id)
                job.gc_id = gc_id

            for key in ['attempt', 'submitted', 'changed']:
                if key in data:
                    setattr(job, key, data[key])
            if 'runtime' not in data:
                if 'submitted' in data and (job.submitted > 0):
                    data['runtime'] = time.time() - float(job.submitted)
                else:
                    data['runtime'] = 0
            for key in irange(1, job.attempt + 1):
                if ('history_' + str(key)).strip() in data:
                    job.history[key] = data['history_' + str(key)]
            job.set_dict(data)
        except Exception:
            raise JobError('Unable to parse data in %s:\n%r' % (name, data))
        return job

    def _read_jobs(self, job_limit):
        ensure_dir_exists(self._path_db, 'job database directory', JobError)

        candidates = []
        for job_fn in fnmatch.filter(os.listdir(self._path_db), 'job_*.txt'):
            try:  # 2xsplit is faster than regex
                jobnum = int(job_fn.split(".")[0].split("_")[1])
            except Exception:
                clear_current_exception()
                continue
            candidates.append((jobnum, job_fn))

        (job_map, max_job_len) = ({}, len(candidates))
        activity = Activity('Reading job infos')
        idx = 0
        for (jobnum, job_fn) in sorted(candidates):
            idx += 1
            if jobnum >= job_limit >= 0:
                self._log.info(
                    'Stopped reading job infos at job #%d out of %d available job files, '
                    + 'since the limit of %d jobs is reached', jobnum,
                    len(candidates), job_limit)
                break
            try:
                job_fn_full = os.path.join(self._path_db, job_fn)
                data = self._fmt.parse(SafeFile(job_fn_full).iter_close())
                job_obj = self._create_job_obj(job_fn_full, data)
            except Exception:
                raise JobError('Unable to process job file %r' % job_fn_full)
            job_map[jobnum] = job_obj
            activity.update('Reading job infos %d [%d%%]' %
                            (idx, (100.0 * idx) / max_job_len))
        activity.finish()
        return job_map

    def _serialize_job_obj(self, job_obj):
        data = job_obj.get_dict_full()
        for key, value in job_obj.history.items():
            data['history_' + str(key)] = value
        if job_obj.gc_id is not None:
            data['id'] = job_obj.get(
                'legacy_gc_id') or job_obj.gc_id  # store legacy gc_id
        return data