def _displaySetup(self, dsPath, head): if os.path.exists(dsPath): nickNames = set() for block in DataProvider.loadFromFile(dsPath).getBlocks(): nickNames.add(block[DataProvider.Nickname]) log = logging.getLogger('user') log.info('Mapping between nickname and other settings:') report = [] (ps_basic, ps_nested) = self._pfactory.getLookupSources() if ps_nested: log.info( 'This list doesn\'t show "nickname constants" with multiple values!' ) for nick in sorted(nickNames): tmp = {'DATASETNICK': nick} for src in ps_basic: src.fillParameterInfo(None, tmp) tmp[1] = str.join( ', ', imap(os.path.basename, self._nmCfg.lookup(nick, '', is_selector=False))) tmp[2] = formatLumiNice( self._nmLumi.lookup(nick, '', is_selector=False)) report.append(tmp) utils.printTabular(head, report, 'cl')
def display(self): (catStateDict, catDescDict, _) = CategoryBaseReport._getCategoryStateSummary(self) infos = [] head = set() stateCat = { Job.SUCCESS: 'SUCCESS', Job.FAILED: 'FAILED', Job.RUNNING: 'RUNNING', Job.DONE: 'RUNNING' } for catKey in catDescDict: tmp = dict(catDescDict[catKey]) head.update(tmp.keys()) for stateKey in catStateDict[catKey]: state = stateCat.get(stateKey, 'WAITING') tmp[state] = tmp.get(state, 0) + catStateDict[catKey][stateKey] infos.append(tmp) stateCatList = ['WAITING', 'RUNNING', 'FAILED', 'SUCCESS'] utils.printTabular(lmap(lambda x: (x, x), sorted(head) + stateCatList), infos, 'c' * len(head), fmt=dict.fromkeys( stateCatList, lambda x: '%7d' % parseStr(x, int, 0)))
def _displaySetup(self, dsPath, head): if os.path.exists(dsPath): nickNames = set() for block in DataProvider.loadFromFile(dsPath).getBlocks(): nickNames.add(block[DataProvider.Nickname]) utils.vprint('Mapping between nickname and other settings:\n', -1) report = [] for nick in sorted(nickNames): lumi_filter_str = formatLumi( self._nmLumi.lookup(nick, '', is_selector=False)) if len(lumi_filter_str) > 4: nice_lumi_filter = '%s ... %s (%d entries)' % ( lumi_filter_str[0], lumi_filter_str[-1], len(lumi_filter_str)) else: nice_lumi_filter = str.join(', ', lumi_filter_str) config_files = self._nmCfg.lookup(nick, '', is_selector=False) tmp = { 0: nick, 1: str.join(', ', imap(os.path.basename, config_files)), 2: nice_lumi_filter } lookupvars = {'DATASETNICK': nick} for src in self._pm.lookupSources: src.fillParameterInfo(None, lookupvars) tmp.update(lookupvars) report.append(tmp) utils.printTabular(head, report, 'cl') utils.vprint(level=-1)
def instrumentCfgQueue(self, cfgFiles, fragment, mustPrepare = False): def isInstrumented(cfgName): cfg = open(cfgName, 'r').read() for tag in self.neededVars(): if (not '__%s__' % tag in cfg) and (not '@%s@' % tag in cfg): return False return True def doInstrument(cfgName): if not isInstrumented(cfgName) or 'customise_for_gc' not in open(cfgName, 'r').read(): utils.vprint('Instrumenting...', os.path.basename(cfgName), -1) open(cfgName, 'a').write(open(fragment, 'r').read()) else: utils.vprint('%s already contains customise_for_gc and all needed variables' % os.path.basename(cfgName), -1) cfgStatus = [] comPath = os.path.dirname(os.path.commonprefix(cfgFiles)) for cfg in cfgFiles: cfgStatus.append({0: cfg.split(comPath, 1)[1].lstrip('/'), 1: str(isInstrumented(cfg)), 2: cfg}) utils.printTabular([(0, 'Config file'), (1, 'Instrumented')], cfgStatus, 'lc') for cfg in cfgFiles: if self.prepare or not isInstrumented(cfg): if self.prepare or utils.getUserBool('Do you want to prepare %s for running over the dataset?' % cfg, True): doInstrument(cfg) if mustPrepare and not (True in map(isInstrumented, cfgFiles)): raise ConfigError('A config file must use %s to work properly!' % str.join(', ', map(lambda x: '@%s@' % x, self.neededVars())))
def _cfgFindUninitialized(self, config, cfgFiles, autoPrepare, mustPrepare): comPath = os.path.dirname(os.path.commonprefix(cfgFiles)) cfgTodo = [] cfgStatus = [] for cfg in cfgFiles: cfg_new = config.getWorkPath(os.path.basename(cfg)) cfg_new_exists = os.path.exists(cfg_new) if cfg_new_exists: isInstrumented = self._cfgIsInstrumented(cfg_new) doCopy = False else: isInstrumented = self._cfgIsInstrumented(cfg) doCopy = True doPrepare = (mustPrepare or autoPrepare) and not isInstrumented doCopy = doCopy or doPrepare if doCopy: cfgTodo.append((cfg, cfg_new, doPrepare)) cfgStatus.append({1: cfg.split(comPath, 1)[1].lstrip('/'), 2: cfg_new_exists, 3: isInstrumented, 4: doPrepare}) utils.vprint('', -1) utils.printTabular([(1, 'Config file'), (2, 'Work dir'), (3, 'Instrumented'), (4, 'Scheduled')], cfgStatus, 'lccc') utils.vprint('', -1) return cfgTodo
def display(self): stateMap = dict(self._stateMap) def transform(data, label, level): if None in data: total = data.pop(None) if (len(data) > 1): for result in self._get_entry(stateMap, total, ['Total']): yield result yield '=' for idx, entry in enumerate(sorted(data)): if level == 1: for result in self._get_entry(stateMap, data[entry], [entry] + label): yield result else: for result in transform(data[entry], [entry] + label, level - 1): yield result if idx != len(data) - 1: yield '-' stats = self._getHierachicalStats() displayStates = lmap(itemgetter(1), self._stateMap) header = [('', 'Category')] + lzip(displayStates, displayStates) printTabular(header, transform(stats, [], len(self._idxList)), fmtString = 'l' + 'c'*len(stateMap), fmt = {'': lambda x: str.join(' ', x)}) return 0
def _cfgFindUninitialized(self, config, cfgFiles, autoPrepare, mustPrepare): comPath = os.path.dirname(os.path.commonprefix(cfgFiles)) cfgTodo = [] cfgStatus = [] for cfg in cfgFiles: cfg_new = config.getWorkPath(os.path.basename(cfg)) cfg_new_exists = os.path.exists(cfg_new) if cfg_new_exists: isInstrumented = self._cfgIsInstrumented(cfg_new) doCopy = False else: isInstrumented = self._cfgIsInstrumented(cfg) doCopy = True doPrepare = (mustPrepare or autoPrepare) and not isInstrumented doCopy = doCopy or doPrepare if doCopy: cfgTodo.append((cfg, cfg_new, doPrepare)) cfgStatus.append({ 1: cfg.split(comPath, 1)[1].lstrip('/'), 2: cfg_new_exists, 3: isInstrumented, 4: doPrepare }) utils.vprint('', -1) utils.printTabular([(1, 'Config file'), (2, 'Work dir'), (3, 'Instrumented'), (4, 'Scheduled')], cfgStatus, 'lccc') utils.vprint('', -1) return cfgTodo
def __init__(self, config, name): head = [(0, "Nickname")] # Mapping between nickname and config files: cfgList = config.get("nickname config", "") self.nmCfg = config.getDict( "nickname config", {}, parser=lambda x: map(str.strip, x.split(",")), str=lambda x: str.join(",", x) )[0] if cfgList: if "config file" in config.getOptions(): raise ConfigError("Please use 'nickname config' instead of 'config file'") allConfigFiles = utils.flatten(self.nmCfg.values()) config.set("config file", str.join("\n", allConfigFiles)) head.append((1, "Config file")) # Mapping between nickname and constants: self.nmCName = map(str.strip, config.get("nickname constants", "").split()) self.nmConst = {} for var in self.nmCName: tmp = config.getDict(var, {})[0] for (nick, value) in tmp.items(): if value: self.nmConst.setdefault(nick, {})[var] = value else: self.nmConst.setdefault(nick, {})[var] = "" head.append((var, var)) # Mapping between nickname and lumi filter: if "lumi filter" in config.getOptions(): raise ConfigError("Please use 'nickname lumi filter' instead of 'lumi filter'") lumiParse = lambda x: formatLumi(parseLumiFilter(x)) self.nmLumi = config.getDict("nickname lumi filter", {}, parser=lumiParse)[0] if self.nmLumi: for dataset in config.get("dataset", "").splitlines(): (datasetNick, datasetProvider, datasetExpr) = DataProvider.parseDatasetExpr(config, dataset, None) config.set( "dataset %s" % datasetNick, "lumi filter", str.join(",", utils.flatten(fromNM(self.nmLumi, datasetNick, []))), ) config.set("lumi filter", str.join(",", self.nmLumi.get(None, []))) head.append((2, "Lumi filter")) utils.vprint("Mapping between nickname and other settings:\n", -1) def report(): for nick in sorted(set(self.nmCfg.keys() + self.nmConst.keys() + self.nmLumi.keys())): tmp = { 0: nick, 1: str.join(", ", map(os.path.basename, self.nmCfg.get(nick, ""))), 2: self.displayLumi(self.nmLumi.get(nick, "")), } yield utils.mergeDicts([tmp, self.nmConst.get(nick, {})]) utils.printTabular(head, report(), "cl") utils.vprint(level=-1) CMSSW.__init__(self, config, name)
def displayPluginList(clsList): header = [('Name', 'Name')] fmtString = 'l' for entry in clsList: if entry['Alias']: header.append(('Alias', 'Alternate names')) fmtString = 'rl' break utils.printTabular(header, sorted(clsList, key=lambda x: x['Name'].lower()), fmtString=fmtString)
def display(self): taskConfig = self._task.getTaskConfig() header = lzip(taskConfig, taskConfig) header.extend(imap(lambda key: (key, '<%s>' % key), self._task.getTransientVars())) variables = set() entries = [] for jobNum in self._jobDB.getJobs(self._selector): jobConfig = self._task.getJobConfig(jobNum) variables.update(jobConfig) entry = dict(taskConfig) entry.update(self._task.getTransientVars()) entry.update(jobConfig) entries.append(entry) printTabular(sorted(header + lzip(variables, variables)), entries)
def display(self): reports = [] for jobNum in self._jobs: jobObj = self._jobDB.getJob(jobNum) if not jobObj or (jobObj.state == Job.INIT): continue reports.append({ 0: jobNum, 1: Job.enum2str(jobObj.state), 2: jobObj.gcID }) self._add_details(reports, jobObj) utils.printTabular( lzip(irange(3), ['Job', 'Status / Attempt', 'Id / Destination']), reports, 'rcl')
def display(self): taskConfig = self._task.getTaskConfig() header = lzip(taskConfig, taskConfig) header.extend( imap(lambda key: (key, '<%s>' % key), self._task.getTransientVars())) variables = set() entries = [] for jobNum in self._jobDB.getJobs(self._selector): jobConfig = self._task.getJobConfig(jobNum) variables.update(jobConfig) entry = dict(taskConfig) entry.update(self._task.getTransientVars()) entry.update(jobConfig) entries.append(entry) printTabular(sorted(header + lzip(variables, variables)), entries)
def display(self): (catStateDict, catDescDict, _) = CategoryBaseReport._getCategoryStateSummary(self) infos = [] head = set() stateCat = {Job.SUCCESS: 'SUCCESS', Job.FAILED: 'FAILED', Job.RUNNING: 'RUNNING', Job.DONE: 'RUNNING'} for catKey in catDescDict: tmp = dict(catDescDict[catKey]) head.update(tmp.keys()) for stateKey in catStateDict[catKey]: state = stateCat.get(stateKey, 'WAITING') tmp[state] = tmp.get(state, 0) + catStateDict[catKey][stateKey] infos.append(tmp) stateCatList = ['WAITING', 'RUNNING', 'FAILED', 'SUCCESS'] utils.printTabular(lmap(lambda x: (x, x), sorted(head) + stateCatList), infos, 'c' * len(head), fmt = dict.fromkeys(stateCatList, lambda x: '%7d' % parseStr(x, int, 0)))
def display(self): reports = [] for jobNum in self._jobs: jobObj = self._jobDB.get(jobNum) if not jobObj or (jobObj.state == Job.INIT): continue reports.append({0: jobNum, 1: Job.enum2str(jobObj.state), 2: jobObj.wmsId}) if utils.verbosity() > 0: history = jobObj.history.items() history.reverse() for at, dest in history: if dest != 'N/A': reports.append({1: at, 2: ' -> ' + dest}) elif jobObj.get('dest', 'N/A') != 'N/A': reports.append({2: ' -> ' + jobObj.get('dest')}) utils.printTabular(lzip(irange(3), ['Job', 'Status / Attempt', 'Id / Destination']), reports, 'rcl')
def display(self): reports = [] for jobNum in self._jobs: jobObj = self._jobDB.get(jobNum) if not jobObj or (jobObj.state == Job.INIT): continue reports.append({0: jobNum, 1: Job.enum2str(jobObj.state), 2: jobObj.wmsId}) if utils.verbosity() > 0: history = jobObj.history.items() history.reverse() for at, dest in history: if dest != "N/A": reports.append({1: at, 2: " -> " + dest}) elif jobObj.get("dest", "N/A") != "N/A": reports.append({2: " -> " + jobObj.get("dest")}) utils.printTabular(zip(range(3), ["Job", "Status / Attempt", "Id / Destination"]), reports, "rcl") utils.vprint()
def _displaySetup(self, dsPath, head): if os.path.exists(dsPath): nickNames = set() for block in DataProvider.loadFromFile(dsPath).getBlocks(show_stats = False): nickNames.add(block[DataProvider.Nickname]) log = logging.getLogger('user') log.info('Mapping between nickname and other settings:') report = [] (ps_basic, ps_nested) = self._pfactory.getLookupSources() if ps_nested: log.info('This list doesn\'t show "nickname constants" with multiple values!') for nick in sorted(nickNames): tmp = {'DATASETNICK': nick} for src in ps_basic: src.fillParameterInfo(None, tmp) tmp[1] = str.join(', ', imap(os.path.basename, self._nmCfg.lookup(nick, '', is_selector = False))) tmp[2] = formatLumiNice(self._nmLumi.lookup(nick, '', is_selector = False)) report.append(tmp) utils.printTabular(head, report, 'cl')
def _displaySetup(self, dsPath, head): if os.path.exists(dsPath): nickNames = set() for block in DataProvider.loadFromFile(dsPath).getBlocks(): nickNames.add(block[DataProvider.Nickname]) utils.vprint('Mapping between nickname and other settings:\n', -1) report = [] for nick in sorted(nickNames): lumi_filter_str = formatLumi(self._nmLumi.lookup(nick, '', is_selector = False)) if len(lumi_filter_str) > 4: nice_lumi_filter = '%s ... %s (%d entries)' % (lumi_filter_str[0], lumi_filter_str[-1], len(lumi_filter_str)) else: nice_lumi_filter = str.join(', ', lumi_filter_str) config_files = self._nmCfg.lookup(nick, '', is_selector = False) tmp = {0: nick, 1: str.join(', ', imap(os.path.basename, config_files)), 2: nice_lumi_filter} lookupvars = {'DATASETNICK': nick} for src in self._pm.lookupSources: src.fillParameterInfo(None, lookupvars) tmp.update(lookupvars) report.append(tmp) utils.printTabular(head, report, 'cl') utils.vprint(level = -1)
def display(self): reports = [] for jobNum in self._jobs: jobObj = self._jobDB.get(jobNum) if not jobObj or (jobObj.state == Job.INIT): continue reports.append({ 0: jobNum, 1: Job.enum2str(jobObj.state), 2: jobObj.wmsId }) if utils.verbosity() > 0: history = jobObj.history.items() history.reverse() for at, dest in history: if dest != 'N/A': reports.append({1: at, 2: ' -> ' + dest}) elif jobObj.get('dest', 'N/A') != 'N/A': reports.append({2: ' -> ' + jobObj.get('dest')}) utils.printTabular( lzip(irange(3), ['Job', 'Status / Attempt', 'Id / Destination']), reports, 'rcl')
def printDatasetOverview(self, blocks): (head, blockInfos, fmt) = self.getDatasetOverviewInfo(blocks) utils.vprint('Using the following datasets:', -1) utils.vprint(level = -1) utils.printTabular(head, blockInfos, 'rcl', fmt = fmt) utils.vprint(level = -1)
fail.add(jobNum) printError(events, splitInfo[DataSplitter.NEntries], 'Inconsistent number of events') printError(skip, splitInfo[DataSplitter.Skipped], 'Inconsistent number of skipped events') printError(files, splitInfo[DataSplitter.FileList], 'Inconsistent list of files') except Exception: logging.warning('Job %d was never initialized!', jobNum) if fail: logging.warning('Failed: ' + str.join('\n', imap(str, fail))) if (opts.partition_list is not None) or opts.partition_list_invalid or opts.partition_check: if len(args) != 1: utils.exitWithUsage(parser.usage('part')) splitter = DataSplitter.loadPartitionsForScript(args[0]) if opts.partition_list_invalid: utils.printTabular([(0, 'Job')], partition_invalid(splitter)) if opts.partition_list is not None: if opts.partition_list in ('', 'all'): keyStrings = DataSplitter.enumNames else: keyStrings = opts.partition_list.split(',') keyList = lmap(DataSplitter.str2enum, keyStrings) if None in keyList: logging.warning('Available keys: %r', DataSplitter.enumNames) utils.printTabular([('jobNum', 'Job')] + lzip(keyList, keyStrings), partition_list(splitter, keyList)) if opts.partition_check: logging.info('Checking %d jobs...', splitter.getMaxJobs()) partition_check(splitter)