def _getSectionKey(self, section):
		tmp = section.split()
		assert(len(tmp) > 0)
		(curSection, curNames, curTags) = (tmp[0], [], {})
		for token in tmp[1:]:
			if ':' in token:
				tag_entry = token.split(':')
				assert(len(tag_entry) == 2)
				curTags[tag_entry[0]] = tag_entry[1]
			elif token:
				curNames.append(token)

		def myIndex(src, value):
			try:
				return src.index(value)
			except Exception:
				return None
		idxClass = myIndex(self._cfgClassSections, curSection)
		idxSection = myIndex(self._cfgSections, curSection)
		if (not self._cfgClassSections) and (not self._cfgSections):
			idxSection = 0
		if (idxClass is not None) or (idxSection is not None): # Section is selected by class or manually
			idxNames = tuple(imap(lambda n: myIndex(self._cfgNames, n), curNames))
			if None not in idxNames: # All names in current section are selected
				curTagNames = lfilter(lambda tn: tn in curTags, self._cfgTagsOrder)
				curTagNamesLeft = lfilter(lambda tn: tn not in self._cfgTagsOrder, curTags)
				idxTags = lmap(lambda tn: myIndex(self._cfgTags, (tn, curTags[tn])), curTagNames)
				if (None not in idxTags) and not curTagNamesLeft:
					return (idxClass, idxSection, idxNames, idxTags)
Beispiel #2
0
def _sort_python_compat_lines(fn):
    output_line_list = []
    output_set = set()
    import_section = False
    for line in SafeFile(fn).iter_close():
        if line.startswith('from') or line.startswith('import'):
            import_section = True
        elif line.strip():
            if import_section:
                output_list = lfilter(lambda x: x.strip() != '', output_set)
                output_list.sort(
                    key=lambda l: (not l.startswith('import'),
                                   ('python_compat' in l), 'testfwk' not in l,
                                   lmap(lambda x: x.split('.'), l.split())))
                for output_line in output_list:
                    output_line_list.append(output_line)
                output_line_list.append('\n\n')
                output_set = set()
            import_section = False
        if not import_section:
            output_line_list.append(line)
        else:
            output_set.add(line)
    if import_section:
        output_list = lfilter(lambda x: x.strip() != '', output_set)
        output_list.sort(
            key=lambda l: (not l.startswith('import'), 'python_compat' not in
                           l, lmap(lambda x: x.split('.'), l.split())))
        for output_line in output_list:
            output_line_list.append(output_line)

    fp = open(fn, 'w')
    for output_line in output_line_list:
        fp.write(output_line)
Beispiel #3
0
    def _checkJobList(self, wms, jobList):
        if self._defect_tries:
            nDefect = len(self._defect_counter
                          )  # Waiting list gets larger in case reported == []
            waitList = self._sample(
                self._defect_counter,
                nDefect - max(1, int(nDefect / 2**self._defect_raster)))
            jobList = lfilter(lambda x: x not in waitList, jobList)

        (change, timeoutList,
         reported) = JobManager._checkJobList(self, wms, jobList)
        for jobNum in reported:
            self._defect_counter.pop(jobNum, None)

        if self._defect_tries and (change is not None):
            self._defect_raster = utils.QM(
                reported, 1,
                self._defect_raster + 1)  # make 'raster' iteratively smaller
            for jobNum in ifilter(lambda x: x not in reported, jobList):
                self._defect_counter[jobNum] = self._defect_counter.get(
                    jobNum, 0) + 1
            kickList = lfilter(
                lambda jobNum: self._defect_counter[jobNum] >= self.
                _defect_tries, self._defect_counter)
            for jobNum in set(kickList + utils.QM(
                (len(reported) == 0) and (len(jobList) == 1), jobList, [])):
                timeoutList.append(jobNum)
                self._defect_counter.pop(jobNum)

        return (change, timeoutList, reported)
Beispiel #4
0
	def makeJDL(self, jobNum, module):
		cfgPath = os.path.join(self._jobPath, 'job_%d.var' % jobNum)
		sbIn = lmap(lambda d_s_t: d_s_t[1], self._getSandboxFilesIn(module))
		sbOut = lmap(lambda d_s_t: d_s_t[2], self._getSandboxFilesOut(module))
		wcList = lfilter(lambda x: '*' in x, sbOut)
		if len(wcList):
			self._writeJobConfig(cfgPath, jobNum, module, {'GC_WC': str.join(' ', wcList)})
			sandboxOutJDL = lfilter(lambda x: x not in wcList, sbOut) + ['GC_WC.tar.gz']
		else:
			self._writeJobConfig(cfgPath, jobNum, module, {})
			sandboxOutJDL = sbOut
		# Warn about too large sandboxes
		sbSizes = lmap(os.path.getsize, sbIn)
		if sbSizes and (self._warnSBSize > 0) and (sum(sbSizes) > self._warnSBSize * 1024 * 1024):
			if not utils.getUserBool('Sandbox is very large (%d bytes) and can cause issues with the WMS! Do you want to continue?' % sum(sbSizes), False):
				sys.exit(os.EX_OK)
			self._warnSBSize = 0

		reqs = self.brokerSite.brokerAdd(module.getRequirements(jobNum), WMS.SITES)
		formatStrList = lambda strList: '{ %s }' % str.join(', ', imap(lambda x: '"%s"' % x, strList))
		contents = {
			'Executable': '"gc-run.sh"',
			'Arguments': '"%d"' % jobNum,
			'StdOutput': '"gc.stdout"',
			'StdError': '"gc.stderr"',
			'InputSandbox': formatStrList(sbIn + [cfgPath]),
			'OutputSandbox': formatStrList(sandboxOutJDL),
			'VirtualOrganisation': '"%s"' % self.vo,
			'Rank': '-other.GlueCEStateEstimatedResponseTime',
			'RetryCount': 2
		}
		return self._jdl_writer.format(reqs, contents)
	def _get_section_key(self, section):
		tmp = section.split()
		if not tmp:
			raise ConfigError('Invalid config section %r' % section)
		(cur_section, cur_name_list, cur_tag_map) = (tmp[0], [], {})
		for token in tmp[1:]:
			if ':' in token:
				tag_entry = token.split(':')
				if len(tag_entry) != 2:
					raise ConfigError('Invalid config tag in section %r' % section)
				cur_tag_map[tag_entry[0]] = tag_entry[1]
			elif token:
				cur_name_list.append(token)

		class_section_idx = safe_index(self._class_section_list, cur_section)
		section_idx = safe_index(self._section_list, cur_section)
		if (not self._class_section_list) and (not self._section_list):
			section_idx = 0
		if (class_section_idx is not None) or (section_idx is not None):
			# Section is selected by class or manually
			name_idx_tuple = tuple(imap(lambda n: safe_index(self._section_name_list, n), cur_name_list))
			if None not in name_idx_tuple:  # All names in current section are selected
				cur_tag_name_list = lfilter(cur_tag_map.__contains__, self._section_tag_order)
				left_tag_name_list = lfilter(lambda tn: tn not in self._section_tag_order, cur_tag_map)
				tag_tuple_list = imap(lambda tn: (tn, cur_tag_map[tn]), cur_tag_name_list)
				tag_idx_tuple = tuple(imap(lambda tt: safe_index(self._section_tag_list, tt), tag_tuple_list))
				if (None not in tag_idx_tuple) and not left_tag_name_list:
					return (class_section_idx, section_idx, name_idx_tuple, tag_idx_tuple)
	def _getSubmissionJobs(self, maxsample):
		# Get list of submittable jobs
		readyList = self.jobDB.getJobs(ClassSelector(JobClass.READY))
		retryOK = readyList
		defaultJob = Job()
		if self._job_retries >= 0:
			retryOK = lfilter(lambda x: self.jobDB.get(x, defaultJob).attempt - 1 < self._job_retries, readyList)
		modOK = lfilter(self._task.canSubmit, readyList)
		jobList = set.intersection(set(retryOK), set(modOK))

		if self._showBlocker and readyList and not jobList: # No submission but ready jobs
			err = []
			err += utils.QM((len(retryOK) > 0) and (len(modOK) == 0), [], ['have hit their maximum number of retries'])
			err += utils.QM((len(retryOK) == 0) and (len(modOK) > 0), [], ['are vetoed by the task module'])
			self._log_user_time.warning('All remaining jobs %s!', str.join(utils.QM(retryOK or modOK, ' or ', ' and '), err))
		self._showBlocker = not (len(readyList) > 0 and len(jobList) == 0)

		# Determine number of jobs to submit
		submit = len(jobList)
		if self._njobs_inqueue > 0:
			submit = min(submit, self._njobs_inqueue - self.jobDB.getJobsN(ClassSelector(JobClass.ATWMS)))
		if self._njobs_inflight > 0:
			submit = min(submit, self._njobs_inflight - self.jobDB.getJobsN(ClassSelector(JobClass.PROCESSING)))
		if self._continuous and (maxsample > 0):
			submit = min(submit, maxsample)
		submit = max(submit, 0)

		if self._do_shuffle:
			return self._sample(jobList, submit)
		return sorted(jobList)[:submit]
Beispiel #7
0
    def _check_get_jobnum_list(self, task, wms, jobnum_list):
        if self._defect_tries:
            num_defect = len(
                self._defect_counter
            )  # Waiting list gets larger in case reported == []
            num_wait = num_defect - max(
                1, int(num_defect / 2**self._defect_raster))
            jobnum_list_wait = self._sample(self._defect_counter, num_wait)
            jobnum_list = lfilter(
                lambda jobnum: jobnum not in jobnum_list_wait, jobnum_list)

        (change, jobnum_list_timeout,
         reported) = JobManager._check_get_jobnum_list(self, task, wms,
                                                       jobnum_list)
        for jobnum in reported:
            self._defect_counter.pop(jobnum, None)

        if self._defect_tries and (change is not None):
            # make 'raster' iteratively smaller
            self._defect_raster += 1
            if reported:
                self._defect_raster = 1
            for jobnum in ifilter(lambda x: x not in reported, jobnum_list):
                self._defect_counter[jobnum] = self._defect_counter.get(
                    jobnum, 0) + 1
            jobnum_list_kick = lfilter(
                lambda jobnum: self._defect_counter[jobnum] >= self.
                _defect_tries, self._defect_counter)
            if (len(reported) == 0) and (len(jobnum_list) == 1):
                jobnum_list_kick.extend(jobnum_list)
            for jobnum in set(jobnum_list_kick):
                jobnum_list_timeout.append(jobnum)
                self._defect_counter.pop(jobnum)

        return (change, jobnum_list_timeout, reported)
Beispiel #8
0
	def _check_get_jobnum_list(self, task, wms, jobnum_list):
		if self._defect_tries:
			num_defect = len(self._defect_counter)  # Waiting list gets larger in case reported == []
			num_wait = num_defect - max(1, int(num_defect / 2 ** self._defect_raster))
			jobnum_list_wait = self._sample(self._defect_counter, num_wait)
			jobnum_list = lfilter(lambda jobnum: jobnum not in jobnum_list_wait, jobnum_list)

		(change, jobnum_list_timeout, reported) = JobManager._check_get_jobnum_list(
			self, task, wms, jobnum_list)
		for jobnum in reported:
			self._defect_counter.pop(jobnum, None)

		if self._defect_tries and (change is not None):
			# make 'raster' iteratively smaller
			self._defect_raster += 1
			if reported:
				self._defect_raster = 1
			for jobnum in ifilter(lambda x: x not in reported, jobnum_list):
				self._defect_counter[jobnum] = self._defect_counter.get(jobnum, 0) + 1
			jobnum_list_kick = lfilter(lambda jobnum: self._defect_counter[jobnum] >= self._defect_tries,
				self._defect_counter)
			if (len(reported) == 0) and (len(jobnum_list) == 1):
				jobnum_list_kick.extend(jobnum_list)
			for jobnum in set(jobnum_list_kick):
				jobnum_list_timeout.append(jobnum)
				self._defect_counter.pop(jobnum)

		return (change, jobnum_list_timeout, reported)
Beispiel #9
0
 def _filterListImpl(self, entries):
     strict_result = lfilter(
         lambda entry: self._matchFunction.match(entry) > 0, entries)
     if strict_result:
         return strict_result
     return lfilter(lambda entry: self._matchFunction.match(entry) >= 0,
                    entries)
Beispiel #10
0
 def resync_psrc(self):
     (psrc_redo, psrc_disable, _) = self._psrc.resync_psrc()
     result_redo = set(lfilter(lambda pnum: pnum < self._max_len,
                               psrc_redo))
     result_disable = set(
         lfilter(lambda pnum: pnum < self._max_len, psrc_disable))
     self._psrc_len = self._psrc.get_parameter_len()
     return (result_redo, result_disable, False
             )  # size can never change on-the-fly
Beispiel #11
0
def list_parameters(opts, psource):
    (result, needGCParam) = get_parameters(opts, psource)
    enabledOutput = opts.output.split(',')
    output = lfilter(lambda k: not opts.output or k in enabledOutput,
                     psource.getJobKeys())
    stored = lfilter(lambda k: k.untracked == False, output)
    untracked = lfilter(lambda k: k.untracked == True, output)

    if opts.collapse > 0:
        result_old = result
        result = {}
        result_nicks = {}
        head = [('COLLATE_JOBS', '# of jobs')]
        if 'DATASETSPLIT' in stored:
            stored.remove('DATASETSPLIT')
            if opts.collapse == 1:
                stored.append('DATASETNICK')
                head.append(('DATASETNICK', 'DATASETNICK'))
            elif opts.collapse == 2:
                head.append(('COLLATE_NICK', '# of nicks'))
        for pset in result_old:
            if ('DATASETSPLIT' in pset) and (opts.collapse == 1):
                pset.pop('DATASETSPLIT')
            nickname = None
            if ('DATASETNICK' in pset) and (opts.collapse == 2):
                nickname = pset.pop('DATASETNICK')
            h = md5_hex(repr(lmap(lambda key: pset.get(str(key)), stored)))
            result.setdefault(h, []).append(pset)
            result_nicks.setdefault(h, set()).add(nickname)

        def doCollate(h):
            tmp = result[h][0]
            tmp['COLLATE_JOBS'] = len(result[h])
            tmp['COLLATE_NICK'] = len(result_nicks[h])
            return tmp

        result = lmap(doCollate, result)
    else:
        head = [('GC_JOB_ID', '#')]
        if needGCParam:
            head.append(('GC_PARAM', 'GC_PARAM'))
    if opts.active:
        head.append((ParameterInfo.ACTIVE, 'ACTIVE'))
    if opts.visible:
        stored = opts.visible.split(',')
    head.extend(sorted(izip(stored, stored)))
    if opts.untracked:
        head.extend(
            sorted(
                imap(
                    lambda n: (n, '(%s)' % n),
                    ifilter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'],
                            untracked))))
    utils.vprint('')
    utils.printTabular(head, result)
Beispiel #12
0
    def _get_jdl_str_list_job(self, jobnum, task, sb_in_fn_list):
        workdir = self._get_remote_output_dn(jobnum)

        # publish the WMS id for Dashboard
        environ = 'CONDOR_WMS_DASHID=https://%s:/$(Cluster).$(Process)' % self._name

        sb_out_fn_list = []
        for (_, src, target) in self._get_out_transfer_info_list(task):
            if src not in ('gc.stdout', 'gc.stderr'):
                sb_out_fn_list.append(target)

        # condor does not handle wildcards in transfer_output_files
        wildcard_list = lfilter(lambda x: '*' in x, sb_out_fn_list)
        if len(wildcard_list):
            sb_out_fn_list = lfilter(lambda x: x not in wildcard_list,
                                     sb_out_fn_list) + ['GC_WC.tar.gz']
            environ += ';GC_WC=' + ' '.join(wildcard_list)

        job_sb_in_fn_list = sb_in_fn_list + [
            os.path.join(workdir, 'job_%d.var' % jobnum)
        ]
        jdl_str_list = [
            # store matching Grid-Control and Condor ID
            '+GridControl_GCtoWMSID = "%s@$(Cluster).$(Process)"' %
            task.get_description(jobnum).job_name,
            '+GridControl_GCIDtoWMSID = "%s@$(Cluster).$(Process)"' % jobnum,
            'environment = %s' % environ,
            # condor doesn"t execute the job directly. actual job data, files and arguments
            # are accessed by the GC scripts (but need to be copied to the worker)
            'transfer_input_files = ' + str.join(', ', job_sb_in_fn_list),
            # only copy important files - stdout and stderr get remapped but transferred
            # automatically, so don't request them as they would not be found
            'transfer_output_files = ' + str.join(', ', sb_out_fn_list),
            'initialdir = ' + workdir,
            'Output = ' + os.path.join(workdir, "gc.stdout"),
            'Error = ' + os.path.join(workdir, "gc.stderr"),
            'arguments = %s ' % jobnum
        ]

        requirements = ''
        if self._user_requirements:
            requirements = '(%s)' % self._user_requirements
        if self._blacklist_nodes:
            if requirements:
                requirements += ' && '
            blacklist_nodes = [
                'Machine != "%s"' % node for node in self._blacklist_nodes
            ]
            requirements += '(%s)' % ' && '.join(blacklist_nodes)
        if requirements:
            jdl_str_list.append('Requirements = (%s)' % requirements)

        jdl_str_list.extend(self._get_jdl_req_str_list(jobnum, task))
        jdl_str_list.append('Queue\n')
        return jdl_str_list
    def cms_name_to_se(self, cms_name):
        cms_name = cms_name.replace('*', '.*')
        cms_name = cms_name.replace('%', '.*')
        cms_name_regex = re.compile(cms_name)

        psn_site_names = lfilter(lambda site: site['type'] == 'psn' and cms_name_regex.match(site['alias']), self._site_names())
        site_names = set(imap(lambda x: x['site_name'], psn_site_names))
        site_resources = lfilter(lambda x: x['site_name'] in site_names, self._site_resources())
        host_list = lfilter(lambda x: x['type'] == 'SE', site_resources)
        host_list = lmap(lambda x: x['fqdn'], host_list)
        return host_list
def list_parameters(opts, psource):
	(result, needGCParam) = get_parameters(opts, psource)
	enabledOutput = opts.output.split(',')
	output = lfilter(lambda k: not opts.output or k in enabledOutput, psource.getJobKeys())
	stored = lfilter(lambda k: k.untracked == False, output)
	untracked = lfilter(lambda k: k.untracked == True, output)

	if opts.collapse > 0:
		result_old = result
		result = {}
		result_nicks = {}
		head = [('COLLATE_JOBS', '# of jobs')]
		if 'DATASETSPLIT' in stored:
			stored.remove('DATASETSPLIT')
			if opts.collapse == 1:
				stored.append('DATASETNICK')
				head.append(('DATASETNICK', 'DATASETNICK'))
			elif opts.collapse == 2:
				head.append(('COLLATE_NICK', '# of nicks'))
		for pset in result_old:
			if ('DATASETSPLIT' in pset) and (opts.collapse == 1):
				pset.pop('DATASETSPLIT')
			nickname = None
			if ('DATASETNICK' in pset) and (opts.collapse == 2):
				nickname = pset.pop('DATASETNICK')
			h = md5_hex(repr(lmap(pset.get, stored)))
			result.setdefault(h, []).append(pset)
			result_nicks.setdefault(h, set()).add(nickname)

		def doCollate(h):
			tmp = result[h][0]
			tmp['COLLATE_JOBS'] = len(result[h])
			tmp['COLLATE_NICK'] = len(result_nicks[h])
			return tmp
		result = lmap(doCollate, result)
	else:
		head = [('GC_JOB_ID', '#')]
		if needGCParam:
			head.append(('GC_PARAM', 'GC_PARAM'))
	if opts.active:
		head.append((ParameterInfo.ACTIVE, 'ACTIVE'))
	if opts.visible:
		stored = opts.visible.split(',')
	head.extend(sorted(izip(stored, stored)))
	if opts.untracked:
		head.extend(sorted(imap(lambda n: (n, '(%s)' % n), ifilter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'], untracked))))
	utils.vprint('')
	utils.printTabular(head, result)
Beispiel #15
0
    def _match_entries(self, container, option_list=None):
        key_list = container.get_options()
        if option_list is not None:
            key_list = lfilter(key_list.__contains__, option_list)

        def _get_entry_key_ordered(entry):
            return (tuple(imap(_remove_none,
                               _get_section_key_filtered(entry))), entry.order)

        def _get_section_key_filtered(entry):
            return self._get_section_key(
                entry.section.replace('!', '').strip())

        def _remove_none(key):
            if key is None:
                return -1
            return key

        def _select_sections(entry):
            return _get_section_key_filtered(entry) is not None

        result = []
        for key in key_list:
            (entries, entries_reverse) = ([], [])
            for entry in container.iter_config_entries(key, _select_sections):
                if entry.section.endswith('!'):
                    entries_reverse.append(entry)
                else:
                    entries.append(entry)
            result.extend(
                sorted(entries_reverse,
                       key=_get_entry_key_ordered,
                       reverse=True))
            result.extend(sorted(entries, key=_get_entry_key_ordered))
        return result
	def __init__(self):
		# Collect host / user / installation specific config files
		def _resolve_hostname():
			import socket
			host = socket.gethostname()
			return ignore_exception(Exception, host, lambda: socket.gethostbyaddr(host)[0])

		try:
			hostname = hang_protection(_resolve_hostname, timeout=5)
		except TimeoutException:
			clear_current_exception()
			hostname = None
			logging.getLogger('console').warning('System call to resolve hostname is hanging!')

		def _get_default_config_fn_iter():  # return possible default config files
			if hostname:  # host / domain specific
				for part_idx in irange(hostname.count('.') + 1, -1, -1):
					yield get_path_pkg('../config/%s.conf' % hostname.split('.', part_idx)[-1])
			yield '/etc/grid-control.conf'  # system specific
			yield '~/.grid-control.conf'  # user specific
			yield get_path_pkg('../config/default.conf')  # installation specific
			if os.environ.get('GC_CONFIG'):
				yield '$GC_CONFIG'  # environment specific

		config_fn_list = list(_get_default_config_fn_iter())
		log = logging.getLogger('config.sources.default')
		log.log(logging.DEBUG1, 'Possible default config files: %s', str.join(', ', config_fn_list))
		config_fn_iter = imap(lambda fn: resolve_path(fn, must_exist=False), config_fn_list)
		FileConfigFiller.__init__(self, lfilter(os.path.exists, config_fn_iter), add_search_path=False)
	def _fill_content_deep(self, config_fn, search_path_list, content_configfile):
		log = logging.getLogger(('config.%s' % get_file_name(config_fn)).rstrip('.').lower())
		log.log(logging.INFO1, 'Reading config file %s', config_fn)
		config_fn = resolve_path(config_fn, search_path_list, exception_type=ConfigError)
		config_str_list = list(SafeFile(config_fn).iter_close())

		# Single pass, non-recursive list retrieval
		tmp_content_configfile = {}
		self._fill_content_shallow(config_fn, config_str_list,
			search_path_list, tmp_content_configfile)

		def _get_list_shallow(section, option):
			for (opt, value, _) in tmp_content_configfile.get(section, []):
				if opt == option:
					for entry in parse_list(value, None):
						yield entry

		search_path_list_new = [os.path.dirname(config_fn)]
		# Add entries from include statement recursively
		for include_fn in _get_list_shallow('global', 'include'):
			self._fill_content_deep(include_fn, search_path_list + search_path_list_new, content_configfile)
		# Process all other entries in current file
		self._fill_content_shallow(config_fn, config_str_list, search_path_list, content_configfile)
		# Override entries in current config file
		for override_fn in _get_list_shallow('global', 'include override'):
			self._fill_content_deep(override_fn, search_path_list + search_path_list_new, content_configfile)
		# Filter special global options
		if content_configfile.get('global', []):
			def _ignore_includes(opt_v_s_tuple):
				return opt_v_s_tuple[0] not in ['include', 'include override']
			content_configfile['global'] = lfilter(_ignore_includes, content_configfile['global'])
		return search_path_list + search_path_list_new
Beispiel #18
0
 def getEntries(self, path, metadata, events, seList, objStore):
     datacachePath = os.path.join(objStore.get('GC_WORKDIR', ''),
                                  'datacache.dat')
     source = utils.QM((self._source == '')
                       and os.path.exists(datacachePath), datacachePath,
                       self._source)
     if source and (source not in self._lfnMap):
         pSource = DataProvider.createInstance('ListProvider',
                                               createConfig(), source)
         for (n, fl) in imap(
                 lambda b:
             (b[DataProvider.Dataset], b[DataProvider.FileList]),
                 pSource.getBlocks()):
             self._lfnMap.setdefault(source, {}).update(
                 dict(
                     imap(
                         lambda fi:
                         (self.lfnTrans(fi[DataProvider.URL]), n), fl)))
     pList = set()
     for key in ifilter(lambda k: k in metadata, self._parentKeys):
         pList.update(
             imap(
                 lambda pPath: self._lfnMap.get(source, {}).get(
                     self.lfnTrans(pPath)), metadata[key]))
     metadata['PARENT_PATH'] = lfilter(identity, pList)
     yield (path, metadata, events, seList, objStore)
Beispiel #19
0
	def __init__(self, config, job_db, task):
		map_cat2jobs = {}
		map_cat2desc = {}
		job_config_dict = {}
		vn_list = []
		for jobnum in job_db.get_job_list():
			if task:
				job_config_dict = task.get_job_dict(jobnum)
			vn_list = lfilter(self._is_not_ignored_vn, sorted(job_config_dict.keys()))
			cat_key = str.join('|', imap(lambda vn: '%s=%s' % (vn, job_config_dict[vn]), vn_list))
			map_cat2jobs.setdefault(cat_key, []).append(jobnum)
			if cat_key not in map_cat2desc:
				map_cat2desc[cat_key] = dict(imap(lambda var: (var, job_config_dict[var]), vn_list))
		# Kill redundant keys from description - seed with last vn_list
		common_var_dict = dict(imap(lambda var: (var, job_config_dict[var]), vn_list))
		for cat_key in map_cat2desc:
			for key in list(common_var_dict.keys()):
				if key not in map_cat2desc[cat_key].keys():
					common_var_dict.pop(key)
				elif common_var_dict[key] != map_cat2desc[cat_key][key]:
					common_var_dict.pop(key)
		for cat_key in map_cat2desc:
			for common_key in common_var_dict:
				map_cat2desc[cat_key].pop(common_key)
		# Generate job-category map with efficient int keys - catNum becomes the new cat_key
		self._job2cat = {}
		self._map_cat2desc = {}
		for cat_num, cat_key in enumerate(sorted(map_cat2jobs)):
			self._map_cat2desc[cat_num] = map_cat2desc[cat_key]
			self._job2cat.update(dict.fromkeys(map_cat2jobs[cat_key], cat_num))
Beispiel #20
0
    def _processReplicas(self, blockPath, replica_infos):
        def empty_with_warning(*args):
            self._log.warning(*args)
            return []

        def expanded_replica_locations(replica_infos):
            for replica_info in replica_infos:
                for entry in self._replicaLocation(replica_info):
                    yield entry

        if not replica_infos:
            return empty_with_warning(
                'Dataset block %r has no replica information!', blockPath)
        replica_infos_selected = self._phedexFilter.filterList(
            replica_infos, key=itemgetter(0))
        if not replica_infos_selected:
            return empty_with_warning(
                'Dataset block %r is not available at the selected locations!\nAvailable locations: %s',
                blockPath, str.join(', ', self._fmtLocations(replica_infos)))
        if not self._onlyComplete:
            return list(expanded_replica_locations(replica_infos_selected))
        replica_infos_complete = lfilter(lambda nn_nh_c: nn_nh_c[2],
                                         replica_infos_selected)
        if not replica_infos_complete:
            return empty_with_warning(
                'Dataset block %r is not completely available at the selected locations!\nAvailable locations: %s',
                blockPath, str.join(', ', self._fmtLocations(replica_infos)))
        return list(expanded_replica_locations(replica_infos_complete))
Beispiel #21
0
def _remove_all_overlap(data):
    def _center_of_mass(data):
        wsum_x = sum(imap(lambda pt: pt['x'] * pt['weight'], data))
        wsum_y = sum(imap(lambda pt: pt['y'] * pt['weight'], data))
        sum_w = sum(imap(lambda pt: pt['weight'], data))
        return {'x': wsum_x / sum_w, 'y': wsum_y / sum_w}

    def _check_overlap(pos_a, pos_b):
        return _dist_sqr(pos_a, pos_b) < (pos_a['weight'] + pos_b['weight'])**2

    def _dist_sqr(pos_a, pos_b):
        return (pos_a['x'] - pos_b['x'])**2 + (pos_a['y'] - pos_b['y'])**2

    def _remove_overlap(fix, pos_a):
        vec = {'x': pos_a['x'] + fix['x'], 'y': pos_a['y'] + fix['y']}
        norm = math.sqrt(_dist_sqr(vec, {'x': 0, 'y': 0})) * 1000
        vec = {'x': vec['x'] / norm, 'y': vec['y'] / norm}
        for pos_ref in result:
            while _check_overlap(pos_ref, pos_a):
                pos_a['x'] = pos_a['x'] + vec['x'] * (random.random() - 0.25)
                pos_a['y'] = pos_a['y'] + vec['y'] * (random.random() - 0.25)
        return pos_a

    result = []
    data = sorted(data, key=lambda x: -x['weight'])
    for pos_ref in data:
        collisions = lfilter(lambda x: _check_overlap(x, pos_ref), result)
        if collisions:
            result.append(_remove_overlap(_center_of_mass(collisions),
                                          pos_ref))
        else:
            result.append(pos_ref)
    return result
Beispiel #22
0
def splitBlackWhiteList(bwfilter):
    blacklist = lmap(
        lambda x: x[1:],
        ifilter(lambda x: x.startswith('-'), QM(bwfilter, bwfilter, [])))
    whitelist = lfilter(lambda x: not x.startswith('-'),
                        QM(bwfilter, bwfilter, []))
    return (blacklist, whitelist)
Beispiel #23
0
	def _fillContentWithIncludes(self, configFile, searchPaths, configContent):
		log = logging.getLogger(('config.%s' % utils.getRootName(configFile)).rstrip('.').lower())
		log.log(logging.INFO1, 'Reading config file %s', configFile)
		configFile = utils.resolvePath(configFile, searchPaths, ErrorClass = ConfigError)
		configFileLines = SafeFile(configFile).readlines()

		# Single pass, non-recursive list retrieval
		tmpConfigContent = {}
		self._fillContentSingleFile(configFile, configFileLines, searchPaths, tmpConfigContent)
		def getFlatList(section, option):
			for (opt, value, src) in tmpConfigContent.get(section, []):
				try:
					if opt == option:
						for entry in parseList(value, None):
							yield entry
				except Exception:
					raise ConfigError('Unable to parse [%s] %s from %s' % (section, option, src))

		newSearchPaths = [os.path.dirname(configFile)]
		# Add entries from include statement recursively
		for includeFile in getFlatList('global', 'include'):
			self._fillContentWithIncludes(includeFile, searchPaths + newSearchPaths, configContent)
		# Process all other entries in current file
		self._fillContentSingleFile(configFile, configFileLines, searchPaths, configContent)
		# Override entries in current config file
		for overrideFile in getFlatList('global', 'include override'):
			self._fillContentWithIncludes(overrideFile, searchPaths + newSearchPaths, configContent)
		# Filter special global options
		if configContent.get('global', []):
			configContent['global'] = lfilter(lambda opt_v_s: opt_v_s[0] not in ['include', 'include override'], configContent['global'])
		return searchPaths + newSearchPaths
Beispiel #24
0
	def processBlock(self, block):
		if self._emptyFiles:
			block[DataProvider.FileList] = lfilter(lambda fi: fi[DataProvider.NEntries] != 0, block[DataProvider.FileList])
		if self._emptyBlock:
			if (block[DataProvider.NEntries] == 0) or not block[DataProvider.FileList]:
				return
		return block
	def _fillContentWithIncludes(self, configFile, searchPaths, configContent):
		log = logging.getLogger(('config.%s' % utils.getRootName(configFile)).rstrip('.').lower())
		log.log(logging.INFO1, 'Reading config file %s', configFile)
		configFile = utils.resolvePath(configFile, searchPaths, ErrorClass = ConfigError)
		configFileLines = SafeFile(configFile).readlines()

		# Single pass, non-recursive list retrieval
		tmpConfigContent = {}
		self._fillContentSingleFile(configFile, configFileLines, searchPaths, tmpConfigContent)
		def getFlatList(section, option):
			for (opt, value, src) in tmpConfigContent.get(section, []):
				try:
					if opt == option:
						for entry in parseList(value, None):
							yield entry
				except Exception:
					raise ConfigError('Unable to parse [%s] %s from %s' % (section, option, src))

		newSearchPaths = [os.path.dirname(configFile)]
		# Add entries from include statement recursively
		for includeFile in getFlatList('global', 'include'):
			self._fillContentWithIncludes(includeFile, searchPaths + newSearchPaths, configContent)
		# Process all other entries in current file
		self._fillContentSingleFile(configFile, configFileLines, searchPaths, configContent)
		# Override entries in current config file
		for overrideFile in getFlatList('global', 'include override'):
			self._fillContentWithIncludes(overrideFile, searchPaths + newSearchPaths, configContent)
		# Filter special global options
		if configContent.get('global', []):
			configContent['global'] = lfilter(lambda opt_v_s: opt_v_s[0] not in ['include', 'include override'], configContent['global'])
		return searchPaths + newSearchPaths
Beispiel #26
0
	def __new__(cls, *psrc_list):
		def _select_extensive_psrc(psrc):
			return (not isinstance(psrc, int)) and (psrc.get_parameter_len() is not None)
		psrc_list = _strip_null_sources(psrc_list)
		if len(lfilter(_select_extensive_psrc, psrc_list)) < 2:
			return ZipLongParameterSource(*psrc_list)
		return MultiParameterSource.__new__(cls, *psrc_list)
Beispiel #27
0
	def _getCategoryStateSummary(self):
		(catStateDict, catDescDict, catSubcatDict) = CategoryBaseReport._getCategoryStateSummary(self)
		# Used for quick calculations
		catLenDict = {}
		for catKey in catStateDict:
			catLenDict[catKey] = sum(catStateDict[catKey].values())
		# Merge successfully completed categories
		self._mergeCats(catStateDict, catDescDict, catSubcatDict, catLenDict,
			'Completed subtasks', lfilter(lambda catKey:
				(len(catStateDict[catKey]) == 1) and (Job.SUCCESS in catStateDict[catKey]), catStateDict))
		# Next merge steps shouldn't see non-dict catKeys in catDescDict
		hiddenDesc = {}
		for catKey in ifilter(lambda catKey: not isinstance(catDescDict[catKey], dict), list(catDescDict)):
			hiddenDesc[catKey] = catDescDict.pop(catKey)
		# Merge categories till goal is reached
		self._mergeCatsWithGoal(catStateDict, catDescDict, catSubcatDict, catLenDict, hiddenDesc)
		# Remove redundant variables from description
		varKeyResult = self._getKeyMergeResults(catDescDict)
		self._clearCategoryDesc(varKeyResult, catDescDict)
		# Restore hidden descriptions
		catDescDict.update(hiddenDesc)
		# Enforce category maximum - merge categories with the least amount of jobs
		if len(catStateDict) != self._catMax:
			self._mergeCats(catStateDict, catDescDict, catSubcatDict, catLenDict, 'Remaining subtasks',
				sorted(catStateDict, key = lambda catKey: -catLenDict[catKey])[self._catMax - 1:])
		# Finalize descriptions:
		if len(catDescDict) == 1:
			catDescDict[list(catDescDict.keys())[0]] = 'All jobs'
		return (catStateDict, catDescDict, catSubcatDict)
Beispiel #28
0
    def _display_setup(self, dataset_fn, head):
        if os.path.exists(dataset_fn):
            nick_name_set = set()
            for block in DataProvider.load_from_file(
                    dataset_fn).get_block_list_cached(show_stats=False):
                nick_name_set.add(block[DataProvider.Nickname])
            self._log.info('Mapping between nickname and other settings:')
            report = []

            def _get_dataset_lookup_psrc(psrc):
                is_lookup_cls = isinstance(
                    psrc,
                    ParameterSource.get_class('LookupBaseParameterSource'))
                return is_lookup_cls and ('DATASETNICK'
                                          in psrc.get_parameter_deps())

            ps_lookup = lfilter(_get_dataset_lookup_psrc,
                                self._source.get_used_psrc_list())
            for nick in sorted(nick_name_set):
                tmp = {'DATASETNICK': nick}
                for src in ps_lookup:
                    src.fill_parameter_content(None, tmp)
                tmp[1] = str.join(
                    ', ',
                    imap(os.path.basename,
                         self._nm_cfg.lookup(nick, '', is_selector=False)))
                tmp[2] = str_lumi_nice(
                    self._nm_lumi.lookup(nick, '', is_selector=False))
                report.append(tmp)
            ConsoleTable.create(head, report, 'cl')
def remove_all_overlap(data):
    dist2 = lambda a, b: (a['x'] - b['x'])**2 + (a['y'] - b['y'])**2
    check_overlap = lambda a, b: dist2(a, b) < (a['weight'] + b['weight'])**2

    def remove_overlap(fix, a):
        vec = {'x': a['x'] + fix['x'], 'y': a['y'] + fix['y']}
        norm = math.sqrt(dist2(vec, {'x': 0, 'y': 0})) * 1000
        vec = {'x': vec['x'] / norm, 'y': vec['y'] / norm}
        for pt in result:
            while check_overlap(pt, a):
                a['x'] = a['x'] + vec['x'] * (random.random() - 0.25)
                a['y'] = a['y'] + vec['y'] * (random.random() - 0.25)
        return a

    def center_of_mass(data):
        wsum_x = sum(imap(lambda pt: pt['x'] * pt['weight'], data))
        wsum_y = sum(imap(lambda pt: pt['y'] * pt['weight'], data))
        sum_w = sum(imap(lambda pt: pt['weight'], data))
        return {'x': wsum_x / sum_w, 'y': wsum_y / sum_w}

    result = []
    data = sorted(data, key=lambda x: -x['weight'])
    for pt in data:
        collisions = lfilter(lambda x: check_overlap(x, pt), result)
        if collisions:
            result.append(remove_overlap(center_of_mass(collisions), pt))
        else:
            result.append(pt)
    return result
	def _getCategoryStateSummary(self):
		(catStateDict, catDescDict, catSubcatDict) = CategoryBaseReport._getCategoryStateSummary(self)
		# Used for quick calculations
		catLenDict = {}
		for catKey in catStateDict:
			catLenDict[catKey] = sum(catStateDict[catKey].values())
		# Merge successfully completed categories
		self._mergeCats(catStateDict, catDescDict, catSubcatDict, catLenDict,
			'Completed subtasks', lfilter(lambda catKey:
				(len(catStateDict[catKey]) == 1) and (Job.SUCCESS in catStateDict[catKey]), catStateDict))
		# Next merge steps shouldn't see non-dict catKeys in catDescDict
		hiddenDesc = {}
		for catKey in ifilter(lambda catKey: not isinstance(catDescDict[catKey], dict), list(catDescDict)):
			hiddenDesc[catKey] = catDescDict.pop(catKey)
		# Merge categories till goal is reached
		self._mergeCatsWithGoal(catStateDict, catDescDict, catSubcatDict, catLenDict, hiddenDesc)
		# Remove redundant variables from description
		varKeyResult = self._getKeyMergeResults(catDescDict)
		self._clearCategoryDesc(varKeyResult, catDescDict)
		# Restore hidden descriptions
		catDescDict.update(hiddenDesc)
		# Enforce category maximum - merge categories with the least amount of jobs
		if len(catStateDict) != self._catMax:
			self._mergeCats(catStateDict, catDescDict, catSubcatDict, catLenDict, 'Remaining subtasks',
				sorted(catStateDict, key = lambda catKey: -catLenDict[catKey])[self._catMax - 1:])
		# Finalize descriptions:
		if len(catDescDict) == 1:
			catDescDict[list(catDescDict.keys())[0]] = 'All jobs'
		return (catStateDict, catDescDict, catSubcatDict)
def remove_all_overlap(data):
	dist2 = lambda a, b: (a['x'] - b['x'])**2 + (a['y'] - b['y'])**2
	check_overlap = lambda a, b: dist2(a, b) < (a['weight'] + b['weight'])**2
	def remove_overlap(fix, a):
		vec = {'x': a['x'] + fix['x'], 'y': a['y'] + fix['y']}
		norm = math.sqrt(dist2(vec, {'x': 0, 'y': 0})) * 1000
		vec = {'x': vec['x'] / norm, 'y': vec['y'] / norm}
		for pt in result:
			while check_overlap(pt, a):
				a['x'] = a['x'] + vec['x'] * (random.random() - 0.25)
				a['y'] = a['y'] + vec['y'] * (random.random() - 0.25)
		return a
	def center_of_mass(data):
		wsum_x = sum(imap(lambda pt: pt['x']*pt['weight'], data))
		wsum_y = sum(imap(lambda pt: pt['y']*pt['weight'], data))
		sum_w = sum(imap(lambda pt: pt['weight'], data))
		return {'x': wsum_x / sum_w, 'y': wsum_y / sum_w}

	result = []
	data = sorted(data, key = lambda x: -x['weight'])
	for pt in data:
		collisions = lfilter(lambda x: check_overlap(x, pt), result)
		if collisions:
			result.append(remove_overlap(center_of_mass(collisions), pt))
		else:
			result.append(pt)
	return result
Beispiel #32
0
	def _broker(self, reqs, items):
		if not self._item_list_discovered:
			return FilterBroker._broker(self, reqs, self._item_list_start)  # Use user constrained items

		# Match items which fulfill the requirements
		def _matcher(props):
			for key, value in reqs:
				if props.get(key) is None:
					continue
				if value >= props[key]:
					return False
			return True
		# Apply sort order and give matching entries as preselection to FilterBroker
		items = lfilter(lambda x: _matcher(self._item_list_discovered[x]),
			self._item_list_start or self._item_list_sorted)
		return FilterBroker._broker(self, reqs, lfilter(items.__contains__, self._item_list_sorted))
    def __init__(self):
        # Collect host / user / installation specific config files
        def resolve_hostname():
            import socket
            host = socket.gethostname()
            try:
                return socket.gethostbyaddr(host)[0]
            except Exception:
                return host

        try:
            host = hang_protection(resolve_hostname, timeout=5)
            hostCfg = lmap(
                lambda c: utils.pathPKG('../config/%s.conf' % host.split(
                    '.', c)[-1]), irange(host.count('.') + 1, -1, -1))
        except TimeoutException:
            sys.stderr.write('System call to resolve hostname is hanging!\n')
            sys.stderr.flush()
            hostCfg = []
        defaultCfg = [
            '/etc/grid-control.conf', '~/.grid-control.conf',
            utils.pathPKG('../config/default.conf')
        ]
        if os.environ.get('GC_CONFIG'):
            defaultCfg.append('$GC_CONFIG')
        log = logging.getLogger('config.default')
        log.log(logging.DEBUG1, 'Possible default config files: %s',
                str.join(', ', defaultCfg))
        fqConfigFiles = lmap(lambda p: utils.resolvePath(p, mustExist=False),
                             hostCfg + defaultCfg)
        FileConfigFiller.__init__(self,
                                  lfilter(os.path.exists, fqConfigFiles),
                                  addSearchPath=False)
Beispiel #34
0
	def _get_possible_merge_categories(self, map_cat2desc):
		# Get dictionary with categories that will get merged when removing a variable
		def _eq_dict(dict_a, dict_b, key):
			# Merge parameters to reach category goal - NP hard problem, so be greedy and quick!
			dict_a = dict(dict_a)
			dict_b = dict(dict_b)
			dict_a.pop(key)
			dict_b.pop(key)
			return dict_a == dict_b

		var_key_result = {}
		cat_key_search_dict = {}
		for cat_key in map_cat2desc:
			for var_key in map_cat2desc[cat_key]:
				if var_key not in cat_key_search_dict:
					cat_key_search = set(map_cat2desc.keys())
				else:
					cat_key_search = cat_key_search_dict[var_key]
				if cat_key_search:
					matches = lfilter(lambda ck: _eq_dict(map_cat2desc[cat_key],
						map_cat2desc[ck], var_key), cat_key_search)
					if matches:
						cat_key_search_dict[var_key] = cat_key_search.difference(set(matches))
						var_key_result.setdefault(var_key, []).append(matches)
		return var_key_result
Beispiel #35
0
def list_parameters(psrc, opts):
    (psp_list, need_gc_param) = get_parameters(opts, psrc)
    enabled_vn_list = opts.output.split(',')
    meta_list = lfilter(lambda k: (k in enabled_vn_list) or not opts.output,
                        psrc.get_job_metadata())
    tracked_vn_list = lmap(lambda k: k.value,
                           ifilter(lambda k: not k.untracked, meta_list))
    untracked_vn_list = lmap(lambda k: k.value,
                             ifilter(lambda k: k.untracked, meta_list))

    if opts.collapse > 0:
        (header_list, psp_list) = collapse_psp_list(psp_list, tracked_vn_list,
                                                    opts)
    else:
        header_list = [('GC_JOB_ID', '#')]
        if need_gc_param:
            header_list.append(('GC_PARAM', 'GC_PARAM'))
    if opts.active:
        header_list.append((ParameterInfo.ACTIVE, 'ACTIVE'))
    if opts.visible:
        tracked_vn_list = opts.visible.split(',')
    header_list.extend(sorted(izip(tracked_vn_list, tracked_vn_list)))
    if opts.untracked:
        header_list.extend(
            sorted(
                imap(
                    lambda n: (n, '(%s)' % n),
                    ifilter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'],
                            untracked_vn_list))))
    ConsoleTable.create(header_list, psp_list)
Beispiel #36
0
 def freezeConfig(self, writeConfig=True):
     self._curContainer.setReadOnly()
     # Inform the user about unused options
     unused = lfilter(
         lambda entry: ('!' not in entry.section) and not entry.accessed,
         self._view.iterContent())
     log = logging.getLogger('config.freeze')
     if unused:
         log.log(logging.INFO1, 'There are %s unused config options!',
                 len(unused))
     for entry in unused:
         log.log(logging.INFO1, '\t%s', entry.format(printSection=True))
     if writeConfig or not os.path.exists(self._oldCfgPath):
         if not os.path.exists(os.path.dirname(self._oldCfgPath)):
             os.makedirs(os.path.dirname(self._oldCfgPath))
         # Write user friendly, flat config file and config file with saved settings
         self._write_file(self._flatCfgPath,
                          printDefault=False,
                          printUnused=False,
                          printMinimal=True)
         self._write_file(
             self._oldCfgPath,
             printDefault=True,
             printUnused=True,
             printMinimal=True,
             printSource=True,
             message=
             '; ==> DO NOT EDIT THIS FILE! <==\n; This file is used to find config changes!\n'
         )
Beispiel #37
0
    def _getKeyMergeResults(self, catDescDict):
        # Merge parameters to reach category goal - NP hard problem, so be greedy and quick!
        def eqDict(a, b, k):
            a = dict(a)
            b = dict(b)
            a.pop(k)
            b.pop(k)
            return a == b

        varKeyResult = {}
        catKeySearchDict = {}
        for catKey in catDescDict:
            for varKey in catDescDict[catKey]:
                if varKey not in catKeySearchDict:
                    catKeySearch = set(catDescDict.keys())
                else:
                    catKeySearch = catKeySearchDict[varKey]
                if catKeySearch:
                    matches = lfilter(
                        lambda ck: eqDict(catDescDict[catKey], catDescDict[ck],
                                          varKey), catKeySearch)
                    if matches:
                        catKeySearchDict[varKey] = catKeySearch.difference(
                            set(matches))
                        varKeyResult.setdefault(varKey, []).append(matches)
        return varKeyResult
Beispiel #38
0
	def _processCfg(self, tar, cfg):
		cfgSummary = {}
		cfgContent = bytes2str(tar.extractfile('%s/config' % cfg).read())
		cfgHashResult = bytes2str(tar.extractfile('%s/hash' % cfg).read()).splitlines()
		cfgHash = cfgHashResult[-1].strip()
		cfgSummary = {'CMSSW_CONFIG_FILE': cfg, 'CMSSW_CONFIG_HASH': cfgHash}
		cfgSummary['CMSSW_CONFIG_CONTENT'] = self._cfgStore.setdefault(cfgSummary[self._mergeKey], cfgContent)
		# Read global tag from config file - first from hash file, then from config file
		if cfgHash not in self._gtStore:
			gtLines = lfilter(lambda x: x.startswith('globaltag:'), cfgHashResult)
			if gtLines:
				self._gtStore[cfgHash] = gtLines[-1].split(':')[1].strip()
		if cfgHash not in self._gtStore:
			try:
				cfgContentEnv = utils.execWrapper(cfgContent)
				self._gtStore[cfgHash] = cfgContentEnv['process'].GlobalTag.globaltag.value()
			except Exception:
				self._gtStore[cfgHash] = 'unknown:All'
		cfgSummary['CMSSW_GLOBALTAG'] = self._gtStore[cfgHash]
		# Get annotation from config content
		def searchConfigFile(key, regex, default):
			try:
				tmp = re.compile(regex).search(cfgContent.group(1).strip('\"\' '))
			except Exception:
				tmp = None
			if tmp:
				cfgSummary[key] = tmp
			else:
				cfgSummary[key] = default
		searchConfigFile('CMSSW_ANNOTATION', r'.*annotation.*=.*cms.untracked.string.*\((.*)\)', None)
		searchConfigFile('CMSSW_DATATIER', r'.*dataTier.*=.*cms.untracked.string.*\((.*)\)', 'USER')
		cfgReport = xml.dom.minidom.parseString(bytes2str(tar.extractfile('%s/report.xml' % cfg).read()))
		evRead = sum(imap(lambda x: int(readTag(x, 'EventsRead')), cfgReport.getElementsByTagName('InputFile')))
		return (cfgSummary, cfgReport, evRead)
Beispiel #39
0
	def format(self, printSection = False, printDefault = False, default = noDefault, source = '', wraplen = 33):
		if (self.value == noDefault) or (not printDefault and (self.value == default)):
			return ''
		if printSection:
			prefix = '[%s] %s' % (self.section, self.option)
		else:
			prefix = self.option
		prefix += ' %s' % self.opttype

		line_list = lfilter(lambda x: x != '', imap(str.strip, self.value.strip().splitlines()))
		if not line_list:
			line_list = [prefix] # just prefix - without trailing whitespace
		elif len(line_list) > 1:
			line_list = [prefix] + line_list # prefix on first line - rest on other lines
		else:
			line_list = [prefix + ' ' + line_list[0]] # everything on one line

		result = ''
		for line in line_list:
			if not result: # first line:
				if source and (len(line) >= wraplen):
					result += '; source: ' + source + '\n'
				elif source:
					result = line.ljust(wraplen) + '  ; ' + source + '\n'
					continue
			else:
				result += '\t'
			result += line + '\n'
		return result.rstrip()
Beispiel #40
0
	def _match_entries(self, container, option_list=None):
		key_list = container.get_options()
		if option_list is not None:
			key_list = lfilter(key_list.__contains__, option_list)

		def _get_entry_key_ordered(entry):
			return (tuple(imap(_remove_none, _get_section_key_filtered(entry))), entry.order)

		def _get_section_key_filtered(entry):
			return self._get_section_key(entry.section.replace('!', '').strip())

		def _remove_none(key):
			if key is None:
				return -1
			return key

		def _select_sections(entry):
			return _get_section_key_filtered(entry) is not None

		result = []
		for key in key_list:
			(entries, entries_reverse) = ([], [])
			for entry in container.iter_config_entries(key, _select_sections):
				if entry.section.endswith('!'):
					entries_reverse.append(entry)
				else:
					entries.append(entry)
			result.extend(sorted(entries_reverse, key=_get_entry_key_ordered, reverse=True))
			result.extend(sorted(entries, key=_get_entry_key_ordered))
		return result
Beispiel #41
0
    def _process_replica_list(self, block_path, replica_infos):
        def _empty_with_warning(error_msg, *args):
            self._log.warning('Dataset block %r ' + error_msg, block_path,
                              *args)
            return []

        def _expanded_replica_locations(replica_infos):
            for replica_info in replica_infos:
                for entry in self._iter_replica_locations(replica_info):
                    yield entry

        if not replica_infos:
            return _empty_with_warning('has no replica information!')
        replica_infos_selected = self._phedex_filter.filter_list(
            replica_infos, key=itemgetter(0))
        if not replica_infos_selected:
            return _empty_with_warning(
                'is not available at the selected locations!\n' +
                'Available locations: %s',
                str.join(', ', self._iter_formatted_locations(replica_infos)))
        if not self._only_complete:
            return list(_expanded_replica_locations(replica_infos_selected))
        replica_infos_complete = lfilter(lambda nn_nh_c: nn_nh_c[2],
                                         replica_infos_selected)
        if not replica_infos_complete:
            return _empty_with_warning(
                'is not completely available at the selected locations!\n' +
                'Available locations: %s',
                str.join(', ', self._iter_formatted_locations(replica_infos)))
        return list(_expanded_replica_locations(replica_infos_complete))
Beispiel #42
0
    def _matchEntries(self, container, option_list=None):
        key_list = container.getKeys()
        if option_list is not None:
            key_list = lfilter(lambda key: key in key_list, option_list)

        result = []
        getFilteredSectionKey = lambda entry: self._getSectionKey(
            entry.section.replace('!', '').strip())

        def removeNone(key):
            if key is None:
                return -1
            return key

        getOrderedEntryKey = lambda entry: (tuple(
            imap(removeNone, getFilteredSectionKey(entry))), entry.order)
        for key in key_list:
            (entries, entries_reverse) = ([], [])
            for entry in container.getEntries(
                    key, lambda x: getFilteredSectionKey(x) is not None):
                if entry.section.endswith('!'):
                    entries_reverse.append(entry)
                else:
                    entries.append(entry)
            result.extend(
                sorted(entries_reverse, key=getOrderedEntryKey, reverse=True))
            result.extend(sorted(entries, key=getOrderedEntryKey))
        return result
Beispiel #43
0
def str_dict_linear(mapping, keys_order=None):
	keys_sorted = sorted(mapping.keys(), key=repr)
	if keys_order is None:
		keys_order = keys_sorted
	else:
		keys_order = list(keys_order)
	keys_order.extend(lfilter(lambda x: x not in keys_order, keys_sorted))
	return str.join(', ', imap(lambda k: '%s = %s' % (k, repr(mapping.get(k))), keys_order))
Beispiel #44
0
 def cancelJobs(self, wmsJobIdList):
     if not len(wmsJobIdList):
         raise StopIteration
     activity = Activity('Canceling jobs')
     assert not bool(
         lfilter(lambda htcid: htcid.scheddURI != self._schedd.getURI(),
                 self._splitGcRequests(wmsJobIdList))
     ), 'Bug! Got jobs at Schedds %s, but servicing only Schedd %s' % (
         lfilter(
             lambda itr: itr.scheddURI != self._schedd.getURI(),
             self._splitGcRequests(wmsJobIdList)), self._schedd.getURI())
     canceledJobs = self._schedd.cancelJobs(
         self._splitGcRequests(wmsJobIdList))
     # Yield ( jobNum, wmsID) for canceled jobs
     for htcJobID in canceledJobs:
         yield (htcJobID.gcJobNum, self._createGcId(htcJobID))
     activity.finish()
Beispiel #45
0
 def _getJobsOutput(self, wmsJobIdList):
     if not len(wmsJobIdList):
         raise StopIteration
     activity = Activity('Fetching jobs')
     assert not bool(
         lfilter(lambda htcid: htcid.scheddURI != self._schedd.getURI(),
                 self._splitGcRequests(wmsJobIdList))
     ), 'Bug! Got jobs at Schedds %s, but servicing only Schedd %s' % (
         lfilter(
             lambda itr: itr.scheddURI != self._schedd.getURI(),
             self._splitGcRequests(wmsJobIdList)), self._schedd.getURI())
     returnedJobs = self._schedd.getJobsOutput(
         self._splitGcRequests(wmsJobIdList))
     # Yield (jobNum, outputPath) per retrieved job
     for htcID in returnedJobs:
         yield (htcID.gcJobNum, self.getSandboxPath(htcID.gcJobNum))
     activity.finish()
Beispiel #46
0
def resolve_install_path(path):
	os_path_list = UniqueList(os.environ['PATH'].split(os.pathsep))
	result = resolve_paths(path, os_path_list, True, PathError)
	result_exe = lfilter(lambda fn: os.access(fn, os.X_OK), result)  # filter executable files
	if not result_exe:
		raise PathError('Files matching %s:\n\t%s\nare not executable!' % (
			path, str.join('\n\t', result)))
	return result_exe[0]
	def __init__(self, arg, **kwargs):
		predef = {
			'TODO': 'SUBMITTED,WAITING,READY,QUEUED,UNKNOWN',
			'ALL': str.join(',', Job.enum_name_list)
		}
		self._state_list = []
		for selector_str in predef.get(arg.upper(), arg).split(','):
			state_name_list = lfilter(re.compile('^%s.*' % selector_str.upper()).match, Job.enum_name_list)
			self._state_list.extend(imap(Job.str2enum, state_name_list))
Beispiel #48
0
def _translate_pa2pspi_list(padapter):
	# Reduces parameter adapter output to essential information for diff - faster than keying
	meta_iter = ifilter(lambda k: not k.untracked, padapter.get_job_metadata())
	meta_list = sorted(meta_iter, key=lambda k: k.value)

	for psp in padapter.iter_jobs():  # Translates parameter space point into hash
		psp_item_iter = imap(lambda meta: (meta.value, psp.get(meta.value)), meta_list)
		hash_str = md5_hex(repr(lfilter(itemgetter(1), psp_item_iter)))
		yield (psp[ParameterInfo.ACTIVE], hash_str, psp['GC_PARAM'])
	def report(self, jobNum):
		info = self.source.getJobInfo(jobNum)
		keys = lfilter(lambda k: not k.untracked, self.source.getJobKeys())
		result = utils.filterDict(info, kF = lambda k: k in keys)
		if self.dataSplitter:
			result.pop('DATASETSPLIT')
			result['Dataset'] = info.get('DATASETNICK', info.get('DATASETPATH', None))
		elif not keys:
			result[' '] = 'All jobs'
		return result
Beispiel #50
0
	def processBlock(self, block):
		if self._emptyFiles:
			n_files = len(block[DataProvider.FileList])
			block[DataProvider.FileList] = lfilter(lambda fi: fi[DataProvider.NEntries] != 0, block[DataProvider.FileList])
			self._removedFiles += n_files - len(block[DataProvider.FileList])
		if self._emptyBlock:
			if (block[DataProvider.NEntries] == 0) or not block[DataProvider.FileList]:
				self._removedBlocks += 1
				return
		return block
Beispiel #51
0
	def _init_psrc_max(self):
		self._psrc_info_list = []
		psrc_group_size = 1
		for (psrc, psrc_max) in izip(self._psrc_list, self._psrc_max_list):
			self._psrc_info_list.append((psrc, psrc_max, psrc_group_size))
			if psrc_max:
				psrc_group_size *= psrc_max
		psrc_max_list = lfilter(lambda n: n is not None, self._psrc_max_list)
		if psrc_max_list:
			return reduce(lambda a, b: a * b, psrc_max_list)
	def initMaxParameters(self):
		self.quickFill = []
		prev = 1
		for (psource, maxN) in izip(self._psourceList, self._psourceMaxList):
			self.quickFill.append((psource, maxN, prev))
			if maxN:
				prev *= maxN
		maxList = lfilter(lambda n: n is not None, self._psourceMaxList)
		if maxList:
			return reduce(lambda a, b: a * b, maxList)
	def _checkJobList(self, wms, jobList):
		if self._defect_tries:
			nDefect = len(self._defect_counter) # Waiting list gets larger in case reported == []
			waitList = self._sample(self._defect_counter, nDefect - max(1, int(nDefect / 2**self._defect_raster)))
			jobList = lfilter(lambda x: x not in waitList, jobList)

		(change, timeoutList, reported) = JobManager._checkJobList(self, wms, jobList)
		for jobNum in reported:
			self._defect_counter.pop(jobNum, None)

		if self._defect_tries and (change is not None):
			self._defect_raster = utils.QM(reported, 1, self._defect_raster + 1) # make 'raster' iteratively smaller
			for jobNum in ifilter(lambda x: x not in reported, jobList):
				self._defect_counter[jobNum] = self._defect_counter.get(jobNum, 0) + 1
			kickList = lfilter(lambda jobNum: self._defect_counter[jobNum] >= self._defect_tries, self._defect_counter)
			for jobNum in set(kickList + utils.QM((len(reported) == 0) and (len(jobList) == 1), jobList, [])):
				timeoutList.append(jobNum)
				self._defect_counter.pop(jobNum)

		return (change, timeoutList, reported)
	def process(self, pNum, splitInfo, result):
		locations = self._filter.filterList(splitInfo.get(DataSplitter.Locations))
		if self._preference:
			if not locations: # [] or None
				locations = self._preference
			elif any(imap(lambda x: x in self._preference, locations)): # preferred location available
				locations = lfilter(lambda x: x in self._preference, locations)
		if self._reqs and (locations is not None):
			result[ParameterInfo.REQS].append((WMS.STORAGE, locations))
		if self._disable:
			result[ParameterInfo.ACTIVE] = result[ParameterInfo.ACTIVE] and (locations != [])