def __init__(self, db, args=None): Logger.__init__(self) self.__db = db if args: run_parser = argparse.ArgumentParser( description='Run pilot command lines.', add_help=False) run_parser.add_argument( '-n', '--node', action='store', dest='node', required=False, default=socket.gethostname(), help="The node name registered into the database.") run_parser.add_argument( '-m', '--master', action='store_true', dest='master', required=False, help="This is a master branch. One node must be a master.") parent = argparse.ArgumentParser(description='', add_help=False) subparser = parent.add_subparsers(dest='option') subparser.add_parser('run', parents=[run_parser]) args.add_parser('pilot', parents=[parent])
def __init__(self, outputFile, **kw): Logger.__init__(self, **kw) if not outputFile.endswith('.root'): outputFile += '.root' from Gaugi.utilities import retrieve_kw # Use this property to rebuild the storegate from a root file self._restoreStoreGate = retrieve_kw(kw, 'restoreStoreGate', False) filterDirs = retrieve_kw(kw, 'filterDirs', None) #Create TFile object to hold everything from ROOT import TFile from Gaugi import expandPath outputFile = expandPath(outputFile) if self._restoreStoreGate: import os.path if not os.path.exists(outputFile): raise ValueError("File '%s' does not exist" % outputFile) self._file = TFile(outputFile, "read") else: self._file = TFile(outputFile, "recreate") self._currentDir = "" self._objects = dict() self._dirs = list() import os self._outputFile = os.path.abspath(outputFile) if self._restoreStoreGate: retrievedObjs = self.__restore(self._file, filterDirs=filterDirs) for name, obj in retrievedObjs: self._dirs.append(name) self._objects[name] = obj
def __init__(self, transientAttrs = set(), toPublicAttrs = set(), **kw): "Initialize streamer and declare transient variables." Logger.__init__(self, kw) self.transientAttrs = set(transientAttrs) | {'_readVersion',} self.toPublicAttrs = set(toPublicAttrs) from Gaugi import checkForUnusedVars checkForUnusedVars( kw, self._logger.warning )
def __setstate__(self, d): """ Add logger to object if it doesn't have one: """ v = d.pop('ignoreAttrs') self.__dict__['ignoreAttrs'] = [re.compile(s) for s in v] Logger.__setstate__(self,d)
def __init__(self, generator, etbins, etabins, x_bin_size, y_bin_size, ymin, ymax, false_alarm_limit=0.5, level=LoggingLevel.INFO, xmin_percentage=1, xmax_percentage=99, plot_stage='Internal', palette=kBlackBody, xmin=None, xmax=None): # init base class Logger.__init__(self, level=level) self.__generator = generator self.__etbins = etbins self.__etabins = etabins self.__ymin = ymin self.__ymax = ymax self.__x_bin_size = x_bin_size self.__y_bin_size = y_bin_size self.__false_alarm_limit = false_alarm_limit self.__xmin_percentage = xmin_percentage self.__xmax_percentage = xmax_percentage self.__plot_stage = plot_stage self.__xmin = xmin self.__xmax = xmax self.__palette = palette
def __init__(self, cls, id, queue_size=1): Logger.__init__(self) Process.__init__(self) self._queue = Queue(queue_size) self._cls = cls self._id = id self._is_alive_event = Event()
def __init__(self , pattern_generator, crossval, **kw ): Logger.__init__(self) self.__pattern_generator = pattern_generator self.crossval = crossval self.optimizer = retrieve_kw( kw, 'optimizer' , 'adam' ) self.loss = retrieve_kw( kw, 'loss' , 'binary_crossentropy' ) self.epochs = retrieve_kw( kw, 'epochs' , 1000 ) self.batch_size = retrieve_kw( kw, 'batch_size' , 1024 ) self.callbacks = retrieve_kw( kw, 'callbacks' , [] ) self.metrics = retrieve_kw( kw, 'metrics' , [] ) self.sorts = retrieve_kw( kw, 'sorts' , range(1) ) self.inits = retrieve_kw( kw, 'inits' , 1 ) job_auto_config = retrieve_kw( kw, 'job' , None ) self.__verbose = retrieve_kw( kw, 'verbose' , True ) self.__class_weight = retrieve_kw( kw, 'class_weight' , False ) self.__save_history = retrieve_kw( kw, 'save_history' , True ) self.decorators = retrieve_kw( kw, 'decorators' , [] ) # read the job configuration from file if job_auto_config: if type(job_auto_config) is str: MSG_INFO( self, 'Reading job configuration from: %s', job_auto_config ) from saphyra.core.readers import JobReader job = JobReader().load( job_auto_config ) else: job = job_auto_config # retrive sort/init lists from file self.sorts = job.getSorts() self.inits = job.getInits() self.__models, self.__id_models = job.getModels() self.__jobId = job.id() # get model and tag from model file or lists models = retrieve_kw( kw, 'models', None ) if models: self.__models = models self.__id_models = [id for id in range(len(models))] self.__jobId = 0 self.__outputfile = retrieve_kw( kw, 'outputFile' , None ) if self.__outputfile: from saphyra.core.readers.versions import TunedData_v1 self.__tunedData = TunedData_v1() checkForUnusedVars(kw) from saphyra import Context self.__context = Context() self.__index_from_cv = None
def __init__(self, etbins, etabins, fill_colors=fill_colors, line_colors=line_colors): Logger.__init__(self) self.__etbins = etbins self.__etabins = etabins self.__these_fill_colors=fill_colors self.__these_line_colors=line_colors self.__hist = {}
def __init__(self, t): Logger.__init__(self) import collections self._containers = collections.OrderedDict() self._tree = NotSet self._decorations = dict() self._current_entry = NotSet self._tree = t
def __init__(self, db, args=None): Logger.__init__(self) self.__db = db if args: # Upload dataset using the dataset CLI registry_parser = argparse.ArgumentParser( description='Dataset registry command lines.', add_help=False) registry_parser.add_argument( '-d', '--dataset', action='store', dest='datasetname', required=True, help= "The dataset name used to registry into the database. (e.g: user.jodafons...)" ) registry_parser.add_argument('-p', '--path', action='store', dest='path', required=True, help="The path to the dataset") # Delete dataset using the dataset CLI unregistry_parser = argparse.ArgumentParser( description='Dataset unregistry command lines.', add_help=False) unregistry_parser.add_argument( '-d', '--dataset', action='store', dest='datasetname', required=True, help="The dataset name to be removed") # Delete dataset using the dataset CLI list_parser = argparse.ArgumentParser( description='Dataset List command lines.', add_help=False) list_parser.add_argument( '-u', '--user', action='store', dest='username', required=False, default=config['username'], help="List all datasets for a selected user.") parent = argparse.ArgumentParser(description='', add_help=False) subparser = parent.add_subparsers(dest='option') # Datasets subparser.add_parser('registry', parents=[registry_parser]) subparser.add_parser('unregistry', parents=[unregistry_parser]) subparser.add_parser('list', parents=[list_parser]) args.add_parser('castor', parents=[parent])
def __init__(self, nthreads, **kw): Logger.__init__(self, **kw) self._nthreads = nthreads self._nFilesPerJob = 20 self._skip_these_keys = [ "features", "etBins", "etaBins", "etBinIdx", "etaBinIdx" ] import re self._pat = re.compile( r'.+(?P<binID>et(?P<etBinIdx>\d+).eta(?P<etaBinIdx>\d+))\..+$')
def __init__(self, command, njobs, maxJobs, output): Logger.__init__(self) self.process_pipe = [] self.output_to_merge = [] import random import time random.seed(time.time()) self._base_id = random.randrange(100000) self._jobList = list(range(njobs)) self._maxJobs = maxJobs self._command = command self._output = output
def __init__(self, db, args=None): Logger.__init__(self) self.__db = db if args: create_parser = argparse.ArgumentParser( description='User create command lines.', add_help=False) create_parser.add_argument('-n', '--name', action='store', dest='name', required=True, help="The name of the user.") create_parser.add_argument('-e', '--email', action='store', dest='email', required=True, help="The user email.") delete_parser = argparse.ArgumentParser( description='User remove command lines.', add_help=False) delete_parser.add_argument('-n', '--name', action='store', dest='name', required=True, help="The dataset name to be removed") # Delete dataset using the dataset CLI list_parser = argparse.ArgumentParser( description='List all users command lines.', add_help=False) list_parser.add_argument('-u', '--user', action='store', dest='name', required=False, help="List all attributes for this user") init_parser = argparse.ArgumentParser( description='Initialize the database.', add_help=False) parent = argparse.ArgumentParser(description='', add_help=False) subparser = parent.add_subparsers(dest='option') # Datasets subparser.add_parser('create', parents=[create_parser]) subparser.add_parser('delete', parents=[delete_parser]) subparser.add_parser('list', parents=[list_parser]) subparser.add_parser('init', parents=[init_parser]) args.add_parser('user', parents=[parent])
def __init__(self): Logger.__init__(self) self._idx = 0 self._is_hlt = False self._decoration = dict() self._tree = None self._event = None self._context = None # this is used for metadata properties self._useMetadataParams = False self._metadataParams = {} self._branches = list() # hold all branches from the body class
def __init__(self, fList): Logger.__init__(self) from Gaugi import csvStr2List from Gaugi import expandFolders self.fList = csvStr2List(fList) self.fList = expandFolders(fList) self.process_pipe = [] self.output_stack = [] import random import time random.seed(time.time()) self._base_id = random.randrange(100000)
def __init__(self, verbose=False, save_the_best=False, patience=False, **kw): super(Callback, self).__init__() Logger.__init__(self, **kw) self.__verbose = verbose self.__patience = patience self.__ipatience = 0 self.__best_sp = 0.0 self.__save_the_best = save_the_best self.__best_weights = NotSet self.__best_epoch = 0 self._validation_data = NotSet
def __init__(self, name): Logger.__init__(self) self._name = name # flags self._wtd = StatusWTD.DISABLE self._status = StatusTool.ENABLE self._initialized = StatusTool.NOT_INITIALIZED self._finalized = StatusTool.NOT_FINALIZED # services and EDMs self._context = NotSet self._storegateSvc = NotSet self._dataframe = NotSet # property self.__property = {}
def __init__(self, ignoreAttrs = set(), toProtectedAttrs = set(), ignoreRawChildren = False, **kw ): """ -> ignoreAttrs: not consider this attributes on the dictionary values. -> toProtectedAttrs: change public attributes to protected or private attributes. That is, suppose the dictionary value is 'val' and the class value should be _val or __val, then add toProtectedAttrs = ['_val'] or '__val'. -> ignoreRawChildren: Do not attempt to conver raw children to higher level object. """ Logger.__init__(self, kw) ignoreAttrs = list(set(ignoreAttrs) | RawDictCnv.baseAttrs) import re self.ignoreAttrs = [re.compile(ignoreAttr) for ignoreAttr in ignoreAttrs] self.toProtectedAttrs = set(toProtectedAttrs) self.ignoreRawChildren = ignoreRawChildren from Gaugi import checkForUnusedVars checkForUnusedVars( kw, self._logger.warning )
def __init__(self, generator, etbins, etabins, x_bin_size, y_bin_size, ymin, ymax, false_alarm_limit=0.5, level=LoggingLevel.INFO, xmin_percentage=1, xmax_percentage=99): # init base class Logger.__init__(self, level=level) self.__generator = generator self.__etbins = etbins self.__etabins = etabins self.__ymin = ymin self.__ymax = ymax self.__x_bin_size = x_bin_size self.__y_bin_size = y_bin_size self.__false_alarm_limit = false_alarm_limit self.__xmin_percentage=xmin_percentage self.__xmax_percentage=xmax_percentage
def __init__(self, fList, reader, nFilesPerJob, nthreads): Logger.__init__(self) from Gaugi import csvStr2List from Gaugi import expandFolders fList = csvStr2List(fList) self._fList = expandFolders(fList) def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] self._fList = [l for l in chunks(self._fList, nFilesPerJob)] self.process_pipe = [] self._outputs = [] self._nthreads = nthreads self._reader = reader
def __init__(self, d={}, **kw): if None in self._contextManager._acceptedTypes: self._contextManager._acceptedTypes = TexObject, if (not isinstance(self, TexObjectCollection) and not hasattr(self, '_preamble') and not hasattr(self, '_enclosure') and not hasattr(self, '_body') and not hasattr(self, '_footer') and not hasattr(self, '_appendix')): raise TexException( self, 'Class %s does not write any tex code.' % self.__class__.__name__) d.update(kw) Logger.__init__(self, d) if hasattr(self, '_body'): self._body = formatTex(self._body, retrieve_kw(d, 'textWidth', 80)) self._stream = kw.pop('stream', tss) self._keywords = { key: val for key, val in d.items() if not key.startswith('_') } self._keywords.update({ key: val for key, val in self.__dict__.items() if not key.startswith('_') }) if 'star' in self._keywords and self._keywords['star']: self._keywords['star'] = '*' else: self._keywords['star'] = '' if hasattr(self, '_assertVars'): for key in self._assertVars: if not key in self._keywords: raise TexException(self, "Assert var %s failed." % key) gcc.set(self) self._contextManaged = d.pop('_contextManaged', True) self._context = self._contextManager() self._isInContext = self._context is not None if (self._isInContext and isinstance(self._context, TexObjectCollection) and self._contextManaged): self._context += self if self._isInContext: self._stream = self._context._stream
def plotNSamples(cls, npatterns, etBins, etaBins, outname='nPatterns.pdf'): """Plot number of samples per bin""" logger = Logger.getModuleLogger("PlotNSamples") from ROOT import TCanvas, gROOT, kTRUE, kFALSE, TH2I, TText gROOT.SetBatch(kTRUE) c1 = TCanvas("plot_patterns_signal", "a", 0, 0, 800, 400) c1.Draw() shape = [len(etBins) - 1, len(etaBins) - 1] histo1 = TH2I( "text_stats", "#color[4]{Signal}/#color[2]{Background} available statistics", shape[0], 0, shape[0], shape[1], 0, shape[1]) #histo1 = TH2I("text_stats", "Signal/Background available statistics", shape[0], 0, shape[0], shape[1], 0, shape[1]) histo1.SetStats(kFALSE) histo1.Draw("TEXT") histo1.SetXTitle("E_{T}") histo1.SetYTitle("#eta") histo1.GetXaxis().SetTitleSize(0.04) histo1.GetYaxis().SetTitleSize(0.04) histo1.GetXaxis().SetLabelSize(0.04) histo1.GetYaxis().SetLabelSize(0.04) histo1.GetXaxis().SetTickSize(0) histo1.GetYaxis().SetTickSize(0) ttest = TText() ttest.SetTextAlign(22) for etBin in range(shape[0]): for etaBin in range(shape[1]): key = 'et%d_eta%d' % (etBin, etaBin) ttest.SetTextColor(4) ttest.DrawText(.5 + etBin, .75 + etaBin, 's: ' + str(npatterns['sgnPattern_' + key])) ttest.SetTextColor(2) ttest.DrawText(.5 + etBin, .25 + etaBin, 'b: ' + str(npatterns['bkgPattern_' + key])) try: histo1.GetYaxis().SetBinLabel( etaBin + 1, '#bf{%d} : %.2f->%.2f' % (etaBin, etaBins[etaBin], etaBins[etaBin + 1])) except Exception: logger.error("Couldn't retrieve eta bin %d bounderies.", etaBin) histo1.GetYaxis().SetBinLabel(etaBin + 1, str(etaBin)) try: histo1.GetXaxis().SetBinLabel( etBin + 1, '#bf{%d} : %d->%d [GeV]' % (etBin, etBins[etBin], etBins[etBin + 1])) except Exception: logger.error("Couldn't retrieve et bin %d bounderies.", etBin) histo1.GetXaxis().SetBinLabel(etBin + 1, str(etaBin)) c1.SetGrid() c1.Update() c1.SaveAs(outname)
def __init__(self, **kw): Logger.__init__(self) self.epochs = retrieve_kw(kw, 'epochs', 1000) self.batch_size = retrieve_kw(kw, 'batch_size', 1024) self.lambda_disco = retrieve_kw(kw, 'lambda_disco', 300) self.callbacks = retrieve_kw(kw, 'callbacks', []) self.metrics = retrieve_kw(kw, 'metrics', []) job_auto_config = retrieve_kw(kw, 'job', None) self.sorts = retrieve_kw(kw, 'sorts', range(1)) self.inits = retrieve_kw(kw, 'inits', 1) self.__verbose = retrieve_kw(kw, 'verbose', True) self.__model_generator = retrieve_kw(kw, 'model_generator', None) self.total = 100000 self.background_percent = 0.99 self.test_size = 0.3 # read the job configuration from file if job_auto_config: if type(job_auto_config) is str: MSG_INFO(self, 'Reading job configuration from: %s', job_auto_config) from saphyra.core.readers import JobReader job = JobReader().load(job_auto_config) else: job = job_auto_config # retrive sort/init lists from file self.sorts = job.getSorts() self.inits = job.getInits() self.__models, self.__id_models = job.getModels() self.__jobId = job.id() # get model and tag from model file or lists models = retrieve_kw(kw, 'models', None) if models: self.__models = models self.__id_models = [id for id in range(len(models))] self.__jobId = 0 checkForUnusedVars(kw)
def watchLock(filename): logger = Logger.getModuleLogger("watchLock") lockFileName = os.path.join( os.path.join(os.path.dirname(filename), '.' + os.path.basename(filename) + '.lock')) firstMsg = True while os.path.exists(lockFileName): if firstMsg: logger.warning("Waiting other process to unlock file %s...", lockFileName) firstMsg = False sleep(1) lockFile = LockFile(lockFileName) return lockFile
def __getstate__(self): """ Makes logger invisible for pickle """ odict = Logger.__getstate__(self) #def getStr(i): # if isinstance(i, re if 'ignoreAttrs' in odict: s = odict['ignoreAttrs'] def getStr(c): try: return c.pattern except AttributeError: return c odict['ignoreAttrs'] = [getStr(v) for v in s] return odict
from prometheus import EventATLAS from prometheus.enumerations import Dataframe as DataframeEnum from Gaugi.messenger import LoggingLevel, Logger from Gaugi import ToolSvc, ToolMgr import argparse mainLogger = Logger.getModuleLogger("job") parser = argparse.ArgumentParser(description='', add_help=False) parser = argparse.ArgumentParser() parser.add_argument( '-i', '--inputFiles', action='store', dest='inputFiles', required=True, nargs='+', help="The input files that will be used to generate the plots") parser.add_argument('-o', '--outputFile', action='store', dest='outputFile', required=False, default=None, help="The output store name.") parser.add_argument('-n', '--nov', action='store', dest='nov', required=False,
#!/usr/bin/env python3 from Gaugi.messenger import LoggingLevel, Logger from Gaugi import GeV import argparse import sys, os mainLogger = Logger.getModuleLogger("pythia") parser = argparse.ArgumentParser(description='', add_help=False) parser = argparse.ArgumentParser() # # Mandatory arguments # parser.add_argument('-i', '--mainFile', action='store', dest='mainFile', required=False, default="", help="The main pythia file configuration") parser.add_argument('-o', '--outputFile', action='store', dest='outputFile', required=True, help="The event file generated by pythia.") parser.add_argument( '--filter',
def __init__(self, name): Logger.__init__(self) import collections self._name = name self._tools = collections.OrderedDict()
#!/usr/bin/env python from Gaugi.messenger import LoggingLevel, Logger import argparse mainLogger = Logger.getModuleLogger("prometheus.merge") parser = argparse.ArgumentParser(description='', add_help=False) parser = argparse.ArgumentParser() parser.add_argument('-i', '--inputFiles', action='store', dest='fList', required=True, nargs='+', help="The input files.") parser.add_argument('-o', '--outputFile', action='store', dest='output', required=True, default='merged.root', help="The output file name.") parser.add_argument('-nm', '--nFilesPerMerge', action='store', dest='nFilesPerMerge', required=False, default=20, type=int,
def __init__(self, outputFile): Logger.__init__(self) if not outputFile: raise TexException(self, 'Cannot stream to empty file path.') self.outputFile = ensureExtension(outputFile, self._outputExtension)