예제 #1
0
class TextFileJobDB(JobDB):
    alias_list = ['textdb']

    def __init__(self, config, job_limit=-1, job_selector=None):
        JobDB.__init__(self, config, job_limit, job_selector)
        self._path_db = config.get_work_path('jobs')
        self._fmt = DictFormat(escape_strings=True)
        try:
            self._job_map = self._read_jobs(self._job_limit)
        except Exception:
            raise JobError('Unable to read stored job information!')
        if self._job_limit < 0 and len(self._job_map) > 0:
            self._job_limit = max(self._job_map) + 1

    def commit(self, jobnum, job_obj):
        with_file(
            SafeFile(os.path.join(self._path_db, 'job_%d.txt' % jobnum), 'w'),
            lambda fp: fp.writelines(
                self._fmt.format(self._serialize_job_obj(job_obj))))
        self._job_map[jobnum] = job_obj

    def get_job(self, jobnum):
        return self._job_map.get(jobnum)

    def get_job_persistent(self, jobnum):
        return self._job_map.get(jobnum, Job())

    def get_job_transient(self, jobnum):
        return self._job_map.get(jobnum, self._default_job_obj)

    def _create_job_obj(self, name, data):
        try:
            job = Job()
            job.state = Job.str2enum(data.pop('status'), Job.UNKNOWN)

            if 'id' in data:
                gc_id = data.pop('id')
                if not gc_id.startswith('WMSID'):  # Legacy support
                    data['legacy_gc_id'] = gc_id
                    if gc_id.startswith('https'):
                        gc_id = 'WMSID.GLITEWMS.%s' % gc_id
                    else:
                        wms_id, wms_name = tuple(gc_id.split('.', 1))
                        gc_id = 'WMSID.%s.%s' % (wms_name, wms_id)
                job.gc_id = gc_id

            for key in ['attempt', 'submitted', 'changed']:
                if key in data:
                    setattr(job, key, data[key])
            if 'runtime' not in data:
                if 'submitted' in data and (job.submitted > 0):
                    data['runtime'] = time.time() - float(job.submitted)
                else:
                    data['runtime'] = 0
            for key in irange(1, job.attempt + 1):
                if ('history_' + str(key)).strip() in data:
                    job.history[key] = data['history_' + str(key)]
            job.set_dict(data)
        except Exception:
            raise JobError('Unable to parse data in %s:\n%r' % (name, data))
        return job

    def _read_jobs(self, job_limit):
        ensure_dir_exists(self._path_db, 'job database directory', JobError)

        candidates = []
        for job_fn in fnmatch.filter(os.listdir(self._path_db), 'job_*.txt'):
            try:  # 2xsplit is faster than regex
                jobnum = int(job_fn.split(".")[0].split("_")[1])
            except Exception:
                clear_current_exception()
                continue
            candidates.append((jobnum, job_fn))

        (job_map, max_job_len) = ({}, len(candidates))
        activity = Activity('Reading job infos')
        idx = 0
        for (jobnum, job_fn) in sorted(candidates):
            idx += 1
            if jobnum >= job_limit >= 0:
                self._log.info(
                    'Stopped reading job infos at job #%d out of %d available job files, '
                    + 'since the limit of %d jobs is reached', jobnum,
                    len(candidates), job_limit)
                break
            try:
                job_fn_full = os.path.join(self._path_db, job_fn)
                data = self._fmt.parse(SafeFile(job_fn_full).iter_close())
                job_obj = self._create_job_obj(job_fn_full, data)
            except Exception:
                raise JobError('Unable to process job file %r' % job_fn_full)
            job_map[jobnum] = job_obj
            activity.update('Reading job infos %d [%d%%]' %
                            (idx, (100.0 * idx) / max_job_len))
        activity.finish()
        return job_map

    def _serialize_job_obj(self, job_obj):
        data = job_obj.get_dict_full()
        for key, value in job_obj.history.items():
            data['history_' + str(key)] = value
        if job_obj.gc_id is not None:
            data['id'] = job_obj.get(
                'legacy_gc_id') or job_obj.gc_id  # store legacy gc_id
        return data
예제 #2
0
class TextFileJobDB(JobDB):
	alias_list = ['textdb']

	def __init__(self, config, job_limit=-1, job_selector=None):
		JobDB.__init__(self, config, job_limit, job_selector)
		self._path_db = config.get_work_path('jobs')
		self._fmt = DictFormat(escape_strings=True)
		try:
			self._job_map = self._read_jobs(self._job_limit)
		except Exception:
			raise JobError('Unable to read stored job information!')
		if self._job_limit < 0 and len(self._job_map) > 0:
			self._job_limit = max(self._job_map) + 1

	def commit(self, jobnum, job_obj):
		with_file(SafeFile(os.path.join(self._path_db, 'job_%d.txt' % jobnum), 'w'),
			lambda fp: fp.writelines(self._fmt.format(self._serialize_job_obj(job_obj))))
		self._job_map[jobnum] = job_obj

	def get_job(self, jobnum):
		return self._job_map.get(jobnum)

	def get_job_persistent(self, jobnum):
		return self._job_map.get(jobnum, Job())

	def get_job_transient(self, jobnum):
		return self._job_map.get(jobnum, self._default_job_obj)

	def _create_job_obj(self, name, data):
		try:
			job = Job()
			job.state = Job.str2enum(data.pop('status'), Job.UNKNOWN)

			if 'id' in data:
				gc_id = data.pop('id')
				if not gc_id.startswith('WMSID'):  # Legacy support
					data['legacy_gc_id'] = gc_id
					if gc_id.startswith('https'):
						gc_id = 'WMSID.GLITEWMS.%s' % gc_id
					else:
						wms_id, wms_name = tuple(gc_id.split('.', 1))
						gc_id = 'WMSID.%s.%s' % (wms_name, wms_id)
				job.gc_id = gc_id

			for key in ['attempt', 'submitted', 'changed']:
				if key in data:
					setattr(job, key, data[key])
			if 'runtime' not in data:
				if 'submitted' in data and (job.submitted > 0):
					data['runtime'] = time.time() - float(job.submitted)
				else:
					data['runtime'] = 0
			for key in irange(1, job.attempt + 1):
				if ('history_' + str(key)).strip() in data:
					job.history[key] = data['history_' + str(key)]
			job.set_dict(data)
		except Exception:
			raise JobError('Unable to parse data in %s:\n%r' % (name, data))
		return job

	def _read_jobs(self, job_limit):
		ensure_dir_exists(self._path_db, 'job database directory', JobError)

		candidates = []
		for job_fn in fnmatch.filter(os.listdir(self._path_db), 'job_*.txt'):
			try:  # 2xsplit is faster than regex
				jobnum = int(job_fn.split(".")[0].split("_")[1])
			except Exception:
				clear_current_exception()
				continue
			candidates.append((jobnum, job_fn))

		(job_map, max_job_len) = ({}, len(candidates))
		activity = Activity('Reading job infos')
		idx = 0
		for (jobnum, job_fn) in sorted(candidates):
			idx += 1
			if jobnum >= job_limit >= 0:
				self._log.info('Stopped reading job infos at job #%d out of %d available job files, ' +
					'since the limit of %d jobs is reached', jobnum, len(candidates), job_limit)
				break
			try:
				job_fn_full = os.path.join(self._path_db, job_fn)
				data = self._fmt.parse(SafeFile(job_fn_full).iter_close())
				job_obj = self._create_job_obj(job_fn_full, data)
			except Exception:
				raise JobError('Unable to process job file %r' % job_fn_full)
			job_map[jobnum] = job_obj
			activity.update('Reading job infos %d [%d%%]' % (idx, (100.0 * idx) / max_job_len))
		activity.finish()
		return job_map

	def _serialize_job_obj(self, job_obj):
		data = job_obj.get_dict_full()
		for key, value in job_obj.history.items():
			data['history_' + str(key)] = value
		if job_obj.gc_id is not None:
			data['id'] = job_obj.get('legacy_gc_id') or job_obj.gc_id  # store legacy gc_id
		return data
예제 #3
0
class JDLWriter(object):
	def __init__(self):
		self._esc_dict = {'\\': r'\\', '\"': r'\"', '\n': r'\n'}
		self._fmt = DictFormat(' = ')

	def format(self, req_list, result=None):
		contents = self.prepare(req_list, result)
		return self._fmt.format(contents, format='%s%s%s;\n')

	def prepare(self, req_list, result=None):
		result = result or dict()
		self._format_reqs(req_list, result)
		self._format_cpus(req_list, result)
		return result

	def _escape(self, value):
		return '"' + str.join('', imap(lambda char: self._esc_dict.get(char, char), value)) + '"'

	def _format_cpus(self, req_list, result):
		for req_type, arg in req_list:
			if (req_type == WMS.CPUS) and (arg > 1):
				result['CpuNumber'] = arg

	def _format_reqs(self, req_list, result):
		req_string_list = ['other.GlueHostNetworkAdapterOutboundIP']
		for req_type, arg in req_list:
			if req_type == WMS.SOFTWARE:
				software_template_str = 'Member(%s, other.GlueHostApplicationSoftwareRunTimeEnvironment)'
				req_string_list.append(software_template_str % self._escape(arg))
			elif req_type == WMS.WALLTIME:
				if arg > 0:
					req_string_list.append('(other.GlueCEPolicyMaxWallClockTime >= %d)' % int((arg + 59) / 60))
			elif req_type == WMS.CPUTIME:
				if arg > 0:
					req_string_list.append('(other.GlueCEPolicyMaxCPUTime >= %d)' % int((arg + 59) / 60))
			elif req_type == WMS.MEMORY:
				if arg > 0:
					req_string_list.append('(other.GlueHostMainMemoryRAMSize >= %d)' % arg)
			elif req_type == WMS.STORAGE:
				req_string_list.append(self._format_reqs_storage(arg))
			elif req_type == WMS.SITES:
				req_string_list.append(self._format_reqs_sites(arg))
			elif req_type in (WMS.CPUS, WMS.DISKSPACE):
				pass  # Handled outside of "requirement" directive or GlueCE attribute not available
			else:
				raise APIError('Unknown requirement type %r or argument %r' % (WMS.enum2str(req_type), arg))
		result['Requirements'] = str.join(' && ', iidfilter(req_string_list))

	def _format_reqs_sites(self, sites):
		def _fmt_sites(site):
			return 'RegExp(%s, other.GlueCEUniqueID)' % self._escape(site)
		(blacklist, whitelist) = split_blackwhite_list(sites)
		sitereqs = lmap(lambda x: '!' + _fmt_sites(x), blacklist)
		if whitelist:
			sitereqs.append('(%s)' % str.join(' || ', imap(_fmt_sites, whitelist)))
		if sitereqs:
			return '( %s )' % str.join(' && ', sitereqs)

	def _format_reqs_storage(self, locations):
		if locations:
			location_template_str = 'Member(%s, other.GlueCESEBindGroupSEUniqueID)'
			location_iter = imap(lambda x: location_template_str % self._escape(x), locations)
			return '( %s )' % str.join(' || ', location_iter)