예제 #1
0
def makeEnum(members = None, cls = None, useHash = True):
	members = members or []
	if cls:
		enumID = md5_hex(str(members) + '!' + cls.__name__)[:4]
	else:
		enumID = md5_hex(str(members))[:4]
		cls = type('Enum_%s_%s' % (enumID, str.join('_', members)), (), {})

	def getValue(idx, name):
		if useHash:
			return idx + int(enumID, 16)
		else:
			return idx
	values = lsmap(getValue, enumerate(members))

	cls.enumNames = members
	cls.enumValues = values
	enumMapNV = dict(izip(imap(str.lower, cls.enumNames), cls.enumValues))
	enumMapVN = dict(izip(cls.enumValues, cls.enumNames))
	if len(enumMapNV) != len(enumMapVN):
		raise APIError('Invalid enum definition!')
	def str2enum(cls, value, *args):
		return enumMapNV.get(value.lower(), *args)
	cls.enum2str = enumMapVN.get
	cls.str2enum = classmethod(str2enum)
	for name, value in izip(cls.enumNames, cls.enumValues):
		setattr(cls, name, value)
	return cls
예제 #2
0
 def fill_parameter_content(self, pnum, result):
     for (psrc, psrc_len) in izip(self._psrc_list, self._psrc_max_list):
         if psrc_len is not None:
             if pnum < psrc_len:
                 psrc.fill_parameter_content(pnum, result)
         else:
             psrc.fill_parameter_content(pnum, result)
예제 #3
0
def list_parameters(psrc, opts):
    (psp_list, need_gc_param) = get_parameters(opts, psrc)
    enabled_vn_list = opts.output.split(',')
    meta_list = lfilter(lambda k: (k in enabled_vn_list) or not opts.output,
                        psrc.get_job_metadata())
    tracked_vn_list = lmap(lambda k: k.value,
                           ifilter(lambda k: not k.untracked, meta_list))
    untracked_vn_list = lmap(lambda k: k.value,
                             ifilter(lambda k: k.untracked, meta_list))

    if opts.collapse > 0:
        (header_list, psp_list) = collapse_psp_list(psp_list, tracked_vn_list,
                                                    opts)
    else:
        header_list = [('GC_JOB_ID', '#')]
        if need_gc_param:
            header_list.append(('GC_PARAM', 'GC_PARAM'))
    if opts.active:
        header_list.append((ParameterInfo.ACTIVE, 'ACTIVE'))
    if opts.visible:
        tracked_vn_list = opts.visible.split(',')
    header_list.extend(sorted(izip(tracked_vn_list, tracked_vn_list)))
    if opts.untracked:
        header_list.extend(
            sorted(
                imap(
                    lambda n: (n, '(%s)' % n),
                    ifilter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'],
                            untracked_vn_list))))
    ConsoleTable.create(header_list, psp_list)
예제 #4
0
 def _do_subst(value):
     return replace_with_dict(
         value, merged_var_dict,
         ichain([
             self.get_var_alias_map().items(),
             izip(additional_var_dict, additional_var_dict)
         ]))
예제 #5
0
파일: wms.py 프로젝트: jolange/grid-control
	def _get_sandbox_file_list(self, task, sm_list):
		# Prepare all input files
		dep_list = set(ichain(imap(lambda x: x.get_dependency_list(), [task] + sm_list)))
		dep_fn_list = lmap(lambda dep: resolve_path('env.%s.sh' % dep,
			lmap(lambda pkg: get_path_share('', pkg=pkg), os.listdir(get_path_pkg()))), dep_list)
		task_config_dict = dict_union(self._remote_event_handler.get_mon_env_dict(),
			*imap(lambda x: x.get_task_dict(), [task] + sm_list))
		task_config_dict.update({'GC_DEPFILES': str.join(' ', dep_list),
			'GC_USERNAME': self._token.get_user_name(), 'GC_WMS_NAME': self._name})
		task_config_str_list = DictFormat(escape_strings=True).format(
			task_config_dict, format='export %s%s%s\n')
		vn_alias_dict = dict(izip(self._remote_event_handler.get_mon_env_dict().keys(),
			self._remote_event_handler.get_mon_env_dict().keys()))
		vn_alias_dict.update(task.get_var_alias_map())
		vn_alias_str_list = DictFormat(delimeter=' ').format(vn_alias_dict, format='%s%s%s\n')

		# Resolve wildcards in task input files
		def _get_task_fn_list():
			for fpi in task.get_sb_in_fpi_list():
				matched = glob.glob(fpi.path_abs)
				if matched != []:
					for match in matched:
						yield match
				else:
					yield fpi.path_abs
		return lchain([self._remote_event_handler.get_file_list(), dep_fn_list, _get_task_fn_list(), [
			VirtualFile('_config.sh', sorted(task_config_str_list)),
			VirtualFile('_varmap.dat', sorted(vn_alias_str_list))]])
예제 #6
0
	def fill_parameter_content(self, pnum, result):
		for (psrc, psrc_len) in izip(self._psrc_list, self._psrc_max_list):
			if psrc_len is not None:
				if pnum < psrc_len:
					psrc.fill_parameter_content(pnum, result)
			else:
				psrc.fill_parameter_content(pnum, result)
예제 #7
0
	def process(self, dn):
		job_info_dict = None
		try:
			job_info_dict = JobInfoProcessor.process(self, dn)
		except JobResultError:
			logger = logging.getLogger('jobs.results')
			logger.warning('Unable to process job information', exc_info=get_current_exception())
			clear_current_exception()
		if job_info_dict:
			job_data_dict = job_info_dict[JobResult.RAW]
			result = {}

			def get_items_with_key(key_prefix):
				return ifilter(lambda key_value: key_value[0].startswith(key_prefix), job_data_dict.items())

			# parse old job info data format for files
			old_fmt_header = [FileInfo.Hash, FileInfo.NameLocal, FileInfo.NameDest, FileInfo.Path]
			for (file_key, file_data) in get_items_with_key('FILE'):
				file_idx = file_key.replace('FILE', '') or '0'
				result[int(file_idx)] = dict(izip(old_fmt_header, file_data.strip('"').split('  ')))
			# parse new job info data format
			for (file_key, file_data) in get_items_with_key('OUTPUT_FILE'):
				(file_idx, file_prop) = file_key.replace('OUTPUT_FILE_', '').split('_')
				if isinstance(file_data, str):
					file_data = file_data.strip('"')
				file_prop = file_prop.lower().replace('dest', 'namedest').replace('local', 'namelocal')
				result.setdefault(int(file_idx), {})[FileInfo.str2enum(file_prop)] = file_data
			return list(result.values())
예제 #8
0
	def _parseTime(self, time_str):
		result = 0
		entry_map = {'yea': 365 * 24 * 60 * 60, 'day': 24 * 60 * 60, 'hou': 60 * 60, 'min': 60, 'sec': 1}
		tmp = time_str.split()
		for (entry, value) in izip(imap(lambda x: x[:3], tmp[1::2]), imap(int, tmp[::2])):
			result += entry_map[entry] * value
		return result
예제 #9
0
    def discover(self):
        tags = ['h_vmem', 'h_cpu', 's_rt']
        reqs = dict(izip(tags, [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]))
        parser = dict(izip(tags, [int, parseTime, parseTime]))

        proc = LocalProcess(self._configExec, '-sql')
        for queue in imap(str.strip, proc.stdout.iter(timeout=10)):
            proc_q = LocalProcess(self._configExec, '-sq', queue)
            queueInfo = {'name': queue}
            for line in proc_q.stdout.iter(timeout=10):
                attr, value = lmap(str.strip, line.split(' ', 1))
                if (attr in tags) and (value != 'INFINITY'):
                    queueInfo[reqs[attr]] = parser[attr](value)
            proc_q.status_raise(timeout=0)
            yield queueInfo
        proc.status_raise(timeout=0)
예제 #10
0
 def _statusReturnLineRead(self, line):
     try:
         statusReturnValues = line.split()
         # transform output string to dictionary
         jobinfo = dict(izip(self.statusReturnKeys, statusReturnValues))
         # extract GC and WMS ID, check for consistency
         jobID, wmsID = jobinfo['GCID@WMSID'].split('@')
         if (wmsID != jobinfo['wmsid']):
             raise BackendError(
                 "Critical! Unable to match jobs in queue! \n CondorID: %s	Expected: %s \n%s"
                 % (jobinfo['wmsid'], wmsID, line))
         jobinfo['jobid'] = int(jobID)
         del jobinfo['GCID@WMSID']
         # extract Host and Queue data
         if "@" in jobinfo["RemoteHost"]:
             jobinfo['dest'] = jobinfo["RemoteHost"].split(
                 "@")[1] + ': /' + jobinfo.get("Queue", "")
         else:
             jobinfo['dest'] = jobinfo["RemoteHost"]
         del jobinfo["RemoteHost"]
         if "Queue" in jobinfo:
             del jobinfo["Queue"]
         # convert status to appropriate format
         status = self._statusMap[jobinfo['status']]
         jobinfo['status'] = self._humanMap[jobinfo['status']]
         return (jobinfo['jobid'], jobinfo['wmsid'], status, jobinfo)
     except Exception:
         raise BackendError('Error reading job info:\n%s' % line)
예제 #11
0
 def emit(self, record):
     fp = open(self._fn, self._mode)
     try:
         try:
             for idx, instance in enumerate(GCLogHandler.config_instances):
                 fp.write('-' * 70 + '\nConfig instance %d\n' % idx +
                          '=' * 70 + '\n')
                 instance.write(fp)
         except Exception:
             fp.write('-> unable to display configuration!\n')
             clear_current_exception()
     finally:
         if GCLogHandler.config_instances:
             fp.write('\n' + '*' * 70 + '\n')
     if make_enum.enum_list:
         fp.write('\nList of enums\n')
         for enum in make_enum.enum_list:
             fp.write('\t%s\n' % str.join(
                 '|',
                 imap(lambda name_value: '%s:%s' % name_value,
                      izip(enum.enum_name_list, enum.enum_value_list))))
         fp.write('\n' + '*' * 70 + '\n')
     fp.write('\n')
     fp.close()
     logging.FileHandler.emit(self, record)
     sys.stderr.write(
         '\nIn case this is caused by a bug, please send the log file:\n' +
         '\t%r\n' % self._fn + 'to [email protected]\n')
예제 #12
0
 def process(self, dn):
     try:
         jobInfo = JobInfoProcessor.process(self, dn)
     except Exception:
         logging.getLogger('wms').warning(sys.exc_info()[1])
         jobInfo = None
     if jobInfo:
         jobData = jobInfo[JobResult.RAW]
         result = {}
         # parse old job info data format for files
         oldFileFormat = [
             FileInfoProcessor.Hash, FileInfoProcessor.NameLocal,
             FileInfoProcessor.NameDest, FileInfoProcessor.Path
         ]
         for (fileKey, fileData) in ifilter(
                 lambda key_value: key_value[0].startswith('FILE'),
                 jobData.items()):
             fileIdx = fileKey.replace('FILE', '').rjust(1, '0')
             result[int(fileIdx)] = dict(
                 izip(oldFileFormat,
                      fileData.strip('"').split('  ')))
         # parse new job info data format
         for (fileKey, fileData) in ifilter(
                 lambda key_value: key_value[0].startswith('OUTPUT_FILE'),
                 jobData.items()):
             (fileIdx, fileProperty) = fileKey.replace('OUTPUT_FILE_',
                                                       '').split('_')
             if isinstance(fileData, str):
                 fileData = fileData.strip('"')
             result.setdefault(
                 int(fileIdx),
                 {})[FileInfoProcessor.str2enum(fileProperty)] = fileData
         return list(result.values())
예제 #13
0
	def _get_sandbox_file_list(self, task, sm_list):
		# Prepare all input files
		dep_list = set(ichain(imap(lambda x: x.get_dependency_list(), [task] + sm_list)))
		dep_fn_list = lmap(lambda dep: resolve_path('env.%s.sh' % dep,
			lmap(lambda pkg: get_path_share('', pkg=pkg), os.listdir(get_path_pkg()))), dep_list)
		task_config_dict = dict_union(self._remote_event_handler.get_mon_env_dict(),
			*imap(lambda x: x.get_task_dict(), [task] + sm_list))
		task_config_dict.update({'GC_DEPFILES': str.join(' ', dep_list),
			'GC_USERNAME': self._token.get_user_name(), 'GC_WMS_NAME': self._name})
		task_config_str_list = DictFormat(escape_strings=True).format(
			task_config_dict, format='export %s%s%s\n')
		vn_alias_dict = dict(izip(self._remote_event_handler.get_mon_env_dict().keys(),
			self._remote_event_handler.get_mon_env_dict().keys()))
		vn_alias_dict.update(task.get_var_alias_map())
		vn_alias_str_list = DictFormat(delimeter=' ').format(vn_alias_dict, format='%s%s%s\n')

		# Resolve wildcards in task input files
		def _get_task_fn_list():
			for fpi in task.get_sb_in_fpi_list():
				matched = glob.glob(fpi.path_abs)
				if matched != []:
					for match in matched:
						yield match
				else:
					yield fpi.path_abs
		return lchain([self._remote_event_handler.get_file_list(), dep_fn_list, _get_task_fn_list(), [
			VirtualFile('_config.sh', sorted(task_config_str_list)),
			VirtualFile('_varmap.dat', sorted(vn_alias_str_list))]])
예제 #14
0
	def fillParameterInfo(self, pNum, result):
		for (psource, maxN) in izip(self._psourceList, self._psourceMaxList):
			if maxN is not None:
				if pNum < maxN:
					psource.fillParameterInfo(pNum, result)
			else:
				psource.fillParameterInfo(pNum, result)
예제 #15
0
    def __init__(self, config, name, task, eventhandler):
        JobManager.__init__(self, config, name, task, eventhandler)

        # Job defect heuristic (not persistent!) - remove jobs, which cause errors when doing status queries
        self._defect_tries = config.getInt(['kick offender', 'defect tries'],
                                           10,
                                           onChange=None)
        (self._defect_counter, self._defect_raster) = ({}, 0)

        # job verification heuristic - launch jobs in chunks of increasing size if enough jobs succeed
        self._verify = False
        self._verifyChunks = config.getList('verify chunks', [-1],
                                            onChange=None,
                                            parseItem=int)
        self._verifyThresh = config.getList(
            ['verify reqs', 'verify threshold'], [0.5],
            onChange=None,
            parseItem=float)
        if self._verifyChunks:
            self._verify = True
            self._verifyThresh += [self._verifyThresh[-1]] * (
                len(self._verifyChunks) - len(self._verifyThresh))
            self._log_user_time.log(logging.INFO1, 'Verification mode active')
            self._log_user_time.log(
                logging.INFO1,
                'Submission is capped unless the success ratio of a chunk of jobs is sufficent.'
            )
            self._log_user_time.debug(
                'Enforcing the following (chunksize x ratio) sequence:')
            self._log_user_time.debug(
                str.join(
                    ' > ',
                    imap(lambda tpl: '%d x %4.2f' % (tpl[0], tpl[1]),
                         izip(self._verifyChunks, self._verifyThresh))))
        self._unreachableGoal = False
예제 #16
0
	def discover(self):
		tags = ['h_vmem', 'h_cpu', 's_rt']
		reqs = dict(izip(tags, [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]))
		parser = dict(izip(tags, [int, parseTime, parseTime]))

		proc = LocalProcess(self._configExec, '-sql')
		for queue in imap(str.strip, proc.stdout.iter(timeout = 10)):
			proc_q = LocalProcess(self._configExec, '-sq', queue)
			queueInfo = {'name': queue}
			for line in proc_q.stdout.iter(timeout = 10):
				attr, value = lmap(str.strip, line.split(' ', 1))
				if (attr in tags) and (value != 'INFINITY'):
					queueInfo[reqs[attr]] = parser[attr](value)
			proc_q.status_raise(timeout = 0)
			yield queueInfo
		proc.status_raise(timeout = 0)
예제 #17
0
	def _acceptLumi(self, block, fi, idxRuns, idxLumi):
		if (idxRuns is None) or (idxLumi is None):
			return True
		fi_meta = fi[DataProvider.Metadata]
		for (run, lumi) in izip(fi_meta[idxRuns], fi_meta[idxLumi]):
			if selectLumi((run, lumi), self._lumi_filter.lookup(block[DataProvider.Nickname], is_selector = False)):
				return True
예제 #18
0
	def _get_just_fun_dict(self, head, align_str):
		just_fun_dict = {'l': str.ljust, 'r': str.rjust, 'c': str.center}
		# just_fun = {id1: str.center, id2: str.rjust, ...}

		def _get_key_format(head_entry, align_str):
			return (head_entry[0], just_fun_dict[align_str])
		return dict(ismap(_get_key_format, izip(head, align_str)))
예제 #19
0
	def _statusReturnLineRead(self,line):
		try:
			statusReturnValues = line.split()
			# transform output string to dictionary
			jobinfo = dict(izip(self.statusReturnKeys, statusReturnValues))
			# extract GC and WMS ID, check for consistency
			jobID,wmsID=jobinfo['GCID@WMSID'].split('@')
			if (wmsID != jobinfo['wmsid']):
				raise BackendError("Critical! Unable to match jobs in queue! \n CondorID: %s	Expected: %s \n%s" % ( jobinfo['wmsid'], wmsID, line ))
			jobinfo['jobid']=int(jobID)
			del jobinfo['GCID@WMSID']
			# extract Host and Queue data
			if "@" in jobinfo["RemoteHost"]:
				jobinfo['dest'] = jobinfo["RemoteHost"].split("@")[1] + ': /' + jobinfo.get("Queue","")
			else:
				jobinfo['dest'] = jobinfo["RemoteHost"]
			del jobinfo["RemoteHost"]
			if "Queue" in jobinfo:
				del jobinfo["Queue"]
			# convert status to appropriate format
			status = self._statusMap[jobinfo['status']]
			jobinfo['status'] = self._humanMap[jobinfo['status']]
			return ( jobinfo['jobid'], jobinfo['wmsid'], status, jobinfo )
		except Exception:
			raise BackendError('Error reading job info:\n%s' % line)
예제 #20
0
 def __init__(self, config, name, job_db, task=None):
     ConsoleReport.__init__(self, config, name, job_db, task)
     self._js_str_dict = {}
     for (js_name, js_enum) in izip(Job.enum_name_list,
                                    Job.enum_value_list):
         self._js_str_dict[js_enum] = 'Jobs  %9s:%%8d  %%3d%%%%' % js_name
     self._height = 4 + int((len(Job.enum_name_list) + 1) / 2)
예제 #21
0
 def _bound_metric(self, time_step_list, metric_list, lim_low, lim_high):
     time_step_list_truncated = []
     metric_list_truncated = []
     for time_step, metric in izip(time_step_list, metric_list):
         if lim_low < time_step < lim_high:
             metric_list_truncated.append(metric)
             time_step_list_truncated.append(time_step)
     return (time_step_list_truncated, metric_list_truncated)
예제 #22
0
    def _get_just_fun_dict(self, head, fmtString):
        justFunDict = {'l': str.ljust, 'r': str.rjust, 'c': str.center}

        # justFun = {id1: str.center, id2: str.rjust, ...}
        def getKeyFormat(headEntry, fmtString):
            return (headEntry[0], justFunDict[fmtString])

        return dict(ismap(getKeyFormat, izip(head, fmtString)))
예제 #23
0
	def _bound_metric(self, time_step_list, metric_list, lim_low, lim_high):
		time_step_list_truncated = []
		metric_list_truncated = []
		for time_step, metric in izip(time_step_list, metric_list):
			if lim_low < time_step < lim_high:
				metric_list_truncated.append(metric)
				time_step_list_truncated.append(time_step)
		return (time_step_list_truncated, metric_list_truncated)
예제 #24
0
    def getQueues(self):
        queues = {}
        tags = ['h_vmem', 'h_cpu', 's_rt']
        reqs = dict(izip(tags, [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]))
        parser = dict(izip(tags, [int, parseTime, parseTime]))

        proc = LocalProcess(self._configExec, '-sql')
        for queue in imap(str.strip, proc.stdout.iter(timeout=10)):
            queues[queue] = dict()
            proc_q = LocalProcess(self._configExec, '-sq %s' % queue)
            for line in proc_q.stdout.iter(timeout=10):
                attr, value = lmap(str.strip, line.split(' ', 1))
                if (attr in tags) and (value != 'INFINITY'):
                    queues[queue][reqs[attr]] = parser[attr](value)
            proc_q.status_raise(timeout=0)
        proc.status_raise(timeout=0)
        return queues
예제 #25
0
	def _query(self, api, **kwargs):
		key = (self._url, api, tuple(kwargs.items()))
		if key not in SiteDB.queryCache:
			SiteDB.queryCache[key] = self._gjrc.get(api = api, params = kwargs or None)
		data = SiteDB.queryCache[key]
		columns = data['desc']['columns']
		for row in data['result']:
			yield dict(izip(columns, row))
예제 #26
0
	def truncateData(self, timeStep, overAllBandwidth, truncFront, truncBack):
		truncatedTimeStep = []
		truncatedOverAllBandwidth = []
		for currentTimeStep, thisBw in izip(timeStep, overAllBandwidth):
			if (currentTimeStep > truncFront) and (currentTimeStep < truncBack):
				truncatedOverAllBandwidth.append(thisBw)
				truncatedTimeStep.append(currentTimeStep)
		return (truncatedTimeStep, truncatedOverAllBandwidth)
예제 #27
0
	def discover(self):
		active = False
		keys = [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]
		parser = dict(izip(keys, [int, parse_time, parse_time]))
		proc = LocalProcess(self._exec, '-q')
		for line in proc.stdout.iter(timeout=10):
			if line.startswith('-'):
				active = True
			elif line.startswith(' '):
				active = False
			elif active:
				fields = lmap(str.strip, line.split()[:4])
				queue_dict = {'name': fields[0]}
				for key, value in ifilter(lambda k_v: not k_v[1].startswith('-'), izip(keys, fields[1:])):
					queue_dict[key] = parser[key](value)
				yield queue_dict
		proc.status_raise(timeout=0)
예제 #28
0
	def getQueues(self):
		queues = {}
		tags = ['h_vmem', 'h_cpu', 's_rt']
		reqs = dict(izip(tags, [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]))
		parser = dict(izip(tags, [int, parseTime, parseTime]))

		proc = LocalProcess(self._configExec, '-sql')
		for queue in imap(str.strip, proc.stdout.iter(timeout = 10)):
			queues[queue] = dict()
			proc_q = LocalProcess(self._configExec, '-sq %s' % queue)
			for line in proc_q.stdout.iter(timeout = 10):
				attr, value = lmap(str.strip, line.split(' ', 1))
				if (attr in tags) and (value != 'INFINITY'):
					queues[queue][reqs[attr]] = parser[attr](value)
			proc_q.status_raise(timeout = 0)
		proc.status_raise(timeout = 0)
		return queues
예제 #29
0
 def _accept_lumi(self, block, fi, idx_runs, idx_lumi, lumi_filter):
     if (idx_runs is None) or (idx_lumi is None):
         return True
     return any(
         imap(
             lambda run_lumi: select_lumi(run_lumi, lumi_filter),
             izip(fi[DataProvider.Metadata][idx_runs],
                  fi[DataProvider.Metadata][idx_lumi])))
예제 #30
0
 def _query(self, api, **kwargs):
     key = (self._url, api, tuple(kwargs.items()))
     if key not in SiteDB.query_cache:
         SiteDB.query_cache[key] = self._gjrc.get(api=api,
                                                  params=kwargs or None)
     data = SiteDB.query_cache[key]
     columns = data['desc']['columns']
     for row in data['result']:
         yield dict(izip(columns, row))
예제 #31
0
	def _match_lookup_dict_key(self, lookup_value_list):
		for lookup_dict_key in self._lookup_order:
			match = True
			lookup_info_iter = izip(lookup_value_list, lookup_dict_key, self._lookup_matcher_list)
			for (lookup_value, lookup_expr, lookup_matcher) in lookup_info_iter:
				if lookup_value is not None:
					match = match and (lookup_matcher.matcher(lookup_value, lookup_expr) > 0)
			if match:
				return lookup_dict_key
예제 #32
0
	def getQueues(self):
		(queues, active) = ({}, False)
		keys = [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]
		parser = dict(izip(keys, [int, parseTime, parseTime]))
		proc = LocalProcess(self.statusExec, '-q')
		for line in proc.stdout.iter(timeout = 10):
			if line.startswith('-'):
				active = True
			elif line.startswith(' '):
				active = False
			elif active:
				fields = lmap(str.strip, line.split()[:4])
				queueInfo = {}
				for key, value in ifilter(lambda k_v: not k_v[1].startswith('-'), izip(keys, fields[1:])):
					queueInfo[key] = parser[key](value)
				queues[fields[0]] = queueInfo
		proc.status_raise(timeout = 0)
		return queues
예제 #33
0
	def _parse(self, proc):
		status_iter = proc.stdout.iter(self._timeout)
		head = lmap(lambda x: x.strip('%').lower(), next(status_iter, '').split())
		for entry in imap(str.strip, status_iter):
			job_info = dict(izip(head, ifilter(lambda x: x != '', entry.split(None, len(head) - 1))))
			job_info[CheckInfo.WMSID] = job_info.pop('pid')
			job_info[CheckInfo.RAW_STATUS] = job_info.pop('stat')
			job_info.update({CheckInfo.QUEUE: 'localqueue', CheckInfo.WN: 'localhost'})
			yield job_info
예제 #34
0
	def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
		for block in self._source.get_block_list_cached(show_stats=False):
			metadata_keys = block.get(DataProvider.Metadata, [])
			for fi in block[DataProvider.FileList]:
				metadata_dict['SRC_DATASET'] = block[DataProvider.Dataset]
				metadata_dict['SRC_BLOCK'] = block[DataProvider.BlockName]
				metadata_dict.update(dict(izip(metadata_keys, fi.get(DataProvider.Metadata, []))))
				yield (fi[DataProvider.URL], metadata_dict, fi[DataProvider.NEntries],
					block[DataProvider.Locations], obj_dict)
예제 #35
0
	def getQueues(self):
		(queues, active) = ({}, False)
		keys = [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]
		parser = dict(izip(keys, [int, parseTime, parseTime]))
		proc = LocalProcess(self.statusExec, '-q')
		for line in proc.stdout.iter(timeout = 10):
			if line.startswith('-'):
				active = True
			elif line.startswith(' '):
				active = False
			elif active:
				fields = lmap(str.strip, line.split()[:4])
				queueInfo = {}
				for key, value in ifilter(lambda k_v: not k_v[1].startswith('-'), izip(keys, fields[1:])):
					queueInfo[key] = parser[key](value)
				queues[fields[0]] = queueInfo
		proc.status_raise(timeout = 0)
		return queues
예제 #36
0
 def truncateData(self, timeStep, overAllBandwidth, truncFront, truncBack):
     truncatedTimeStep = []
     truncatedOverAllBandwidth = []
     for currentTimeStep, thisBw in izip(timeStep, overAllBandwidth):
         if (currentTimeStep > truncFront) and (currentTimeStep <
                                                truncBack):
             truncatedOverAllBandwidth.append(thisBw)
             truncatedTimeStep.append(currentTimeStep)
     return (truncatedTimeStep, truncatedOverAllBandwidth)
예제 #37
0
    def _get_just_fun_dict(self, head, align_str):
        just_fun_dict = {'l': str.ljust, 'r': str.rjust, 'c': str.center}

        # just_fun = {id1: str.center, id2: str.rjust, ...}

        def _get_key_format(head_entry, align_str):
            return (head_entry[0], just_fun_dict[align_str])

        return dict(ismap(_get_key_format, izip(head, align_str)))
예제 #38
0
	def matchRule(self, src):
		srcValues = lmap(lambda key: src.get(key, None), self._lookup_keys)
		for lookupValues in self._lookup_order:
			match = True
			for (sval, lval, lmatch) in izip(srcValues, lookupValues, self._lookup_functions):
				if sval is not None:
					match = match and (lmatch.matcher(sval, lval) > 0)
			if match:
				return lookupValues
예제 #39
0
	def _init_psrc_max(self):
		self._psrc_info_list = []
		psrc_group_size = 1
		for (psrc, psrc_max) in izip(self._psrc_list, self._psrc_max_list):
			self._psrc_info_list.append((psrc, psrc_max, psrc_group_size))
			if psrc_max:
				psrc_group_size *= psrc_max
		psrc_max_list = lfilter(lambda n: n is not None, self._psrc_max_list)
		if psrc_max_list:
			return reduce(lambda a, b: a * b, psrc_max_list)
예제 #40
0
def list_block_metadata(datasets, blocks):
	for block in blocks:
		if len(datasets) > 1:
			print('Dataset: %s' % block[DataProvider.Dataset])
		print('Blockname: %s' % block[DataProvider.BlockName])
		mkdict = lambda x: dict(izip(block[DataProvider.Metadata], x[DataProvider.Metadata]))
		metadata = utils.QM(block[DataProvider.FileList], mkdict(block[DataProvider.FileList][0]), {})
		for fileInfo in block[DataProvider.FileList]:
			utils.intersectDict(metadata, mkdict(fileInfo))
		print_metadata(metadata.items(), max([0] + lmap(len, metadata.keys())))
예제 #41
0
	def getVarMapping(self):
		# Transient variables
		transients = ['GC_DATE', 'GC_TIMESTAMP', 'GC_GUID'] # these variables are determined on the WN
		# Alias vars: Eg. __MY_JOB__ will access $GC_JOB_ID - used mostly for compatibility
		alias = {'DATE': 'GC_DATE', 'TIMESTAMP': 'GC_TIMESTAMP', 'GUID': 'GC_GUID',
			'MY_JOBID': 'GC_JOB_ID', 'MY_JOB': 'GC_JOB_ID', 'JOBID': 'GC_JOB_ID', 'GC_JOBID': 'GC_JOB_ID',
			'CONF': 'GC_CONF', 'TASK_ID': 'GC_TASK_ID'}
		varNames = self.getVarNames() + transients
		alias.update(dict(izip(varNames, varNames))) # include reflexive mappings
		return alias
예제 #42
0
	def get_var_alias_map(self):
		# Transient variables
		transients = ['GC_DATE', 'GC_TIMESTAMP', 'GC_GUID']  # these variables are determined on the WN
		# Alias vars: Eg. __MY_JOB__ will access $GC_JOB_ID - used mostly for compatibility
		var_alias_map = {'DATE': 'GC_DATE', 'TIMESTAMP': 'GC_TIMESTAMP', 'GUID': 'GC_GUID',
			'MY_JOBID': 'GC_JOB_ID', 'MY_JOB': 'GC_JOB_ID', 'JOBID': 'GC_JOB_ID', 'GC_JOBID': 'GC_JOB_ID',
			'CONF': 'GC_CONF', 'TASK_ID': 'GC_TASK_ID'}
		var_name_list = self._get_var_name_list() + transients
		var_alias_map.update(dict(izip(var_name_list, var_name_list)))  # include reflexive mappings
		return var_alias_map
예제 #43
0
 def matchRule(self, src):
     srcValues = lmap(lambda key: src.get(key, None), self._lookup_keys)
     for lookupValues in self._lookup_order:
         match = True
         for (sval, lval, lmatch) in izip(srcValues, lookupValues,
                                          self._lookup_functions):
             if sval is not None:
                 match = match and (lmatch.matcher(sval, lval) > 0)
         if match:
             return lookupValues
예제 #44
0
 def discover(self):
     active = False
     keys = [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]
     parser = dict(izip(keys, [int, parseTime, parseTime]))
     proc = LocalProcess(self._exec, '-q')
     for line in proc.stdout.iter(timeout=10):
         if line.startswith('-'):
             active = True
         elif line.startswith(' '):
             active = False
         elif active:
             fields = lmap(str.strip, line.split()[:4])
             queueInfo = {'name': fields[0]}
             for key, value in ifilter(
                     lambda k_v: not k_v[1].startswith('-'),
                     izip(keys, fields[1:])):
                 queueInfo[key] = parser[key](value)
             yield queueInfo
     proc.status_raise(timeout=0)
예제 #45
0
	def getVarMapping(self):
		# Transient variables
		transients = ['GC_DATE', 'GC_TIMESTAMP', 'GC_GUID'] # these variables are determined on the WN
		# Alias vars: Eg. __MY_JOB__ will access $GC_JOB_ID - used mostly for compatibility
		alias = {'DATE': 'GC_DATE', 'TIMESTAMP': 'GC_TIMESTAMP', 'GUID': 'GC_GUID',
			'MY_JOBID': 'GC_JOB_ID', 'MY_JOB': 'GC_JOB_ID', 'JOBID': 'GC_JOB_ID', 'GC_JOBID': 'GC_JOB_ID',
			'CONF': 'GC_CONF', 'TASK_ID': 'GC_TASK_ID'}
		varNames = self.getVarNames() + transients
		alias.update(dict(izip(varNames, varNames))) # include reflexive mappings
		return alias
예제 #46
0
 def _init_psrc_max(self):
     self._psrc_info_list = []
     psrc_group_size = 1
     for (psrc, psrc_max) in izip(self._psrc_list, self._psrc_max_list):
         self._psrc_info_list.append((psrc, psrc_max, psrc_group_size))
         if psrc_max:
             psrc_group_size *= psrc_max
     psrc_max_list = lfilter(lambda n: n is not None, self._psrc_max_list)
     if psrc_max_list:
         return reduce(lambda a, b: a * b, psrc_max_list)
예제 #47
0
 def _acceptLumi(self, block, fi, idxRuns, idxLumi):
     if (idxRuns is None) or (idxLumi is None):
         return True
     fi_meta = fi[DataProvider.Metadata]
     for (run, lumi) in izip(fi_meta[idxRuns], fi_meta[idxLumi]):
         if selectLumi(
             (run, lumi),
                 self._lumi_filter.lookup(block[DataProvider.Nickname],
                                          is_selector=False)):
             return True
예제 #48
0
	def initMaxParameters(self):
		self.quickFill = []
		prev = 1
		for (psource, maxN) in izip(self._psourceList, self._psourceMaxList):
			self.quickFill.append((psource, maxN, prev))
			if maxN:
				prev *= maxN
		maxList = lfilter(lambda n: n is not None, self._psourceMaxList)
		if maxList:
			return reduce(lambda a, b: a * b, maxList)
예제 #49
0
    def show_report(self, job_db, jobnum_list):
        js_dict = self._get_job_state_dict(job_db, jobnum_list)
        self._start_time = self._start_time or time.time()
        self._start_js_dict = self._start_js_dict or js_dict

        def _add_column(output_list, js_value, name, color, spacing=1):
            output_list.append(
                (color + ANSI.bold + '%6d' % js_dict[js_value] + ANSI.reset,
                 color + name + ANSI.reset, color + ANSI.bold +
                 '%5.1f%%' % _make_per(Job.INIT) + ANSI.reset))
            if spacing > 0:
                output_list += [(' ' * spacing, '>', ' ' * spacing)]

        def _make_per(value):
            return js_dict[value] / float(js_dict[None]) * 100

        (dt_start, dt_div, dt_delim) = (time.time() - self._start_time, 60, ':'
                                        )  # mmm:ss format
        if dt_start > 24 * 60 * 60:  # > 24 h
            (dt_start, dt_div, dt_delim) = (dt_start / (60 * 60), 24., 'd'
                                            )  # DDDdhh format
        elif dt_start > 60 * 60:  # > 60 min
            (dt_start, dt_div, dt_delim) = (dt_start / 60, 60, 'h'
                                            )  # HHHhmm format
        output_list = [
            (' ' * 6,
             '%3d%s%02d' % (dt_start / dt_div, dt_delim, dt_start % dt_div),
             ' ' * 6)
        ]
        _add_column(output_list, Job.INIT, '  INIT', '')
        _add_column(output_list,
                    JobClass.ATWMS,
                    'QUEUED',
                    ANSI.color_yellow,
                    spacing=2)
        _add_column(output_list, JobClass.ATWMS, 'RUNNING', ANSI.color_cyan)
        _add_column(output_list,
                    JobClass.ATWMS,
                    '  DONE',
                    ANSI.color_blue,
                    spacing=0)
        output_list += [('/', '|', '\\')]

        def _get_status_str(js_value, name, color):
            return color + name + ANSI.reset + ANSI.bold + color + ' %6d %5.1f%%' % (
                js_dict[js_value], _make_per(js_value)) + ANSI.reset

        output_list += [(
            _get_status_str(Job.SUCCESS, 'SUCCESS', ANSI.color_green),
            ANSI.back_white + ANSI.color_black +
            str(BasicProgressBar(0, js_dict[None], 21, js_dict[Job.SUCCESS])) +
            ANSI.reset,
            _get_status_str(JobClass.FAILING, 'FAILING', ANSI.color_red))]
        for entry in izip(*output_list):
            self._show_line(str.join(' ', entry))
예제 #50
0
def list_parameters(opts, psource):
    (result, needGCParam) = get_parameters(opts, psource)
    enabledOutput = opts.output.split(',')
    output = lfilter(lambda k: not opts.output or k in enabledOutput,
                     psource.getJobKeys())
    stored = lfilter(lambda k: k.untracked == False, output)
    untracked = lfilter(lambda k: k.untracked == True, output)

    if opts.collapse > 0:
        result_old = result
        result = {}
        result_nicks = {}
        head = [('COLLATE_JOBS', '# of jobs')]
        if 'DATASETSPLIT' in stored:
            stored.remove('DATASETSPLIT')
            if opts.collapse == 1:
                stored.append('DATASETNICK')
                head.append(('DATASETNICK', 'DATASETNICK'))
            elif opts.collapse == 2:
                head.append(('COLLATE_NICK', '# of nicks'))
        for pset in result_old:
            if ('DATASETSPLIT' in pset) and (opts.collapse == 1):
                pset.pop('DATASETSPLIT')
            nickname = None
            if ('DATASETNICK' in pset) and (opts.collapse == 2):
                nickname = pset.pop('DATASETNICK')
            h = md5_hex(repr(lmap(lambda key: pset.get(str(key)), stored)))
            result.setdefault(h, []).append(pset)
            result_nicks.setdefault(h, set()).add(nickname)

        def doCollate(h):
            tmp = result[h][0]
            tmp['COLLATE_JOBS'] = len(result[h])
            tmp['COLLATE_NICK'] = len(result_nicks[h])
            return tmp

        result = lmap(doCollate, result)
    else:
        head = [('GC_JOB_ID', '#')]
        if needGCParam:
            head.append(('GC_PARAM', 'GC_PARAM'))
    if opts.active:
        head.append((ParameterInfo.ACTIVE, 'ACTIVE'))
    if opts.visible:
        stored = opts.visible.split(',')
    head.extend(sorted(izip(stored, stored)))
    if opts.untracked:
        head.extend(
            sorted(
                imap(
                    lambda n: (n, '(%s)' % n),
                    ifilter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'],
                            untracked))))
    utils.vprint('')
    utils.printTabular(head, result)
예제 #51
0
	def _set_subelement_layout(self):  # recalculate layout of subelements
		height_list = lmap(lambda element: element.get_height(), self._element_list)
		height_total = sum(imap(lambda element_height: element_height or 0, height_list))
		pos = self._layout_pos
		for (element, element_height) in izip(self._element_list, height_list):
			if element_height is None:
				element_height = max(0, self._layout_height - height_total)
			element.set_layout(pos, min(self._layout_height - pos, element_height),
				self._layout_width, self._set_subelement_layout)  # call this function if height changes
			pos += element_height
		self._subelements_height = pos - self._layout_pos
예제 #52
0
	def _parse(self, proc):
		status_iter = proc.stdout.iter(self._timeout)
		next(status_iter)
		tmpHead = [CheckInfo.WMSID, 'user', CheckInfo.RAW_STATUS, CheckInfo.QUEUE, 'from', CheckInfo.WN, 'job_name']
		for line in ifilter(identity, status_iter):
			try:
				tmp = line.split()
				job_info = dict(izip(tmpHead, tmp[:7]))
				job_info['submit_time'] = str.join(' ', tmp[7:10])
				yield job_info
			except Exception:
				raise BackendError('Error reading job info:\n%s' % line)
예제 #53
0
def create_dbs3_json_files(opts, block_info, block_dump):
	block_size = 0
	dataset_type = set()
	for file_info in block_info[DataProvider.FileList]:
		metadata_info = dict(izip(block_info[DataProvider.Metadata], file_info[DataProvider.Metadata]))
		if metadata_info['CMSSW_DATATYPE']:  # this is not always correctly filled
			dataset_type.add(metadata_info['CMSSW_DATATYPE'])
		file_size = metadata_info['SE_OUTPUT_SIZE']
		lfn = file_info[DataProvider.URL]

		# add file information
		block_dump['files'].append({
			'logical_file_name': lfn, 'file_size': file_size,
			'check_sum': metadata_info['SE_OUTPUT_HASH_CRC32'],
			'md5': metadata_info['SE_OUTPUT_HASH_MD5'],
			'adler32': 'NOTSET',
			'file_lumi_list': lmap(lambda run_lumi:
				{'run_num': run_lumi[0], 'lumi_section_num': run_lumi[1]}, metadata_info['CMSSW_LUMIS']),
			'event_count': metadata_info['CMSSW_EVENTS_WRITE'],
			'file_type': 'EDM',
			'auto_cross_section': 0.0,
		})

		# add file parentage information
		if not opts.no_parents:
			block_dump['file_parent_list'].extend(imap(lambda parent_lfn:
				{'logical_file_name': lfn, 'parent_logical_file_name': parent_lfn},
				metadata_info['CMSSW_PARENT_LFN']))

		# fill file / dataset configurations
		dataset_conf_dict = {
			'release_version': metadata_info['CMSSW_VERSION'],
			'pset_hash': metadata_info['CMSSW_CONFIG_HASH'],
			'app_name': 'cmsRun',
			'output_module_label': 'crab2_mod_label',
			'global_tag': metadata_info.get('CMSSW_GLOBALTAG', opts.globaltag)
		}
		if opts.unique_cfg:
			dataset_conf_dict['pset_hash'] = md5_hex(dataset_conf_dict['pset_hash'] +
				block_info[DataProvider.Dataset])
		if dataset_conf_dict not in block_dump['dataset_conf_list']:
			block_dump['dataset_conf_list'].append(dataset_conf_dict)

		# file configurations also specifies lfn
		file_conf_dict = dict(dataset_conf_dict)
		file_conf_dict['lfn'] = lfn
		block_dump['file_conf_list'].append(file_conf_dict)

		# update block size for block summary information
		block_size += file_size
	return (block_size, dataset_type)
예제 #54
0
	def parseStatus(self, status):
		next(status)
		tmpHead = ['id', 'user', 'status', 'queue', 'from', 'dest_host', 'job_name']
		for jobline in status:
			if jobline != '':
				try:
					tmp = jobline.split()
					jobinfo = dict(izip(tmpHead, tmp[:7]))
					jobinfo['submit_time'] = str.join(' ', tmp[7:10])
					jobinfo['dest'] = 'N/A'
					if jobinfo['dest_host'] != '-':
						jobinfo['dest'] = '%s/%s' % (jobinfo['dest_host'], jobinfo['queue'])
					yield jobinfo
				except Exception:
					raise BackendError('Error reading job info:\n%s' % jobline)
예제 #55
0
def list_parameters(opts, psource):
	(result, needGCParam) = get_parameters(opts, psource)
	enabledOutput = opts.output.split(',')
	output = lfilter(lambda k: not opts.output or k in enabledOutput, psource.getJobKeys())
	stored = lfilter(lambda k: k.untracked == False, output)
	untracked = lfilter(lambda k: k.untracked == True, output)

	if opts.collapse > 0:
		result_old = result
		result = {}
		result_nicks = {}
		head = [('COLLATE_JOBS', '# of jobs')]
		if 'DATASETSPLIT' in stored:
			stored.remove('DATASETSPLIT')
			if opts.collapse == 1:
				stored.append('DATASETNICK')
				head.append(('DATASETNICK', 'DATASETNICK'))
			elif opts.collapse == 2:
				head.append(('COLLATE_NICK', '# of nicks'))
		for pset in result_old:
			if ('DATASETSPLIT' in pset) and (opts.collapse == 1):
				pset.pop('DATASETSPLIT')
			nickname = None
			if ('DATASETNICK' in pset) and (opts.collapse == 2):
				nickname = pset.pop('DATASETNICK')
			h = md5_hex(repr(lmap(pset.get, stored)))
			result.setdefault(h, []).append(pset)
			result_nicks.setdefault(h, set()).add(nickname)

		def doCollate(h):
			tmp = result[h][0]
			tmp['COLLATE_JOBS'] = len(result[h])
			tmp['COLLATE_NICK'] = len(result_nicks[h])
			return tmp
		result = lmap(doCollate, result)
	else:
		head = [('GC_JOB_ID', '#')]
		if needGCParam:
			head.append(('GC_PARAM', 'GC_PARAM'))
	if opts.active:
		head.append((ParameterInfo.ACTIVE, 'ACTIVE'))
	if opts.visible:
		stored = opts.visible.split(',')
	head.extend(sorted(izip(stored, stored)))
	if opts.untracked:
		head.extend(sorted(imap(lambda n: (n, '(%s)' % n), ifilter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'], untracked))))
	utils.vprint('')
	utils.printTabular(head, result)
예제 #56
0
	def _parse(self, proc):
		tmpHead = [CheckInfo.WMSID, 'user', 'group', 'job_name', CheckInfo.QUEUE, 'partition',
			'nodes', 'cpu_time', 'wall_time', 'memory', 'queue_time', CheckInfo.RAW_STATUS]
		status_iter = ifilter(identity, proc.stdout.iter(self._timeout))
		next(status_iter)
		next(status_iter)
		for line in status_iter:
			tmp = lmap(lambda x: x.strip(), line.replace('\x1b(B', '').replace('\x1b[m', '').split())
			job_info = dict(izip(tmpHead, tmp[:12]))
			if len(tmp) > 12:
				job_info['start_time'] = tmp[12]
			if len(tmp) > 13:
				job_info['kill_time'] = tmp[13]
			if len(tmp) > 14:
				job_info[CheckInfo.WN] = tmp[14]
			yield job_info