Exemplo n.º 1
0
class ShiftMonitor:

    def __init__(self):
        # Suppress root warnings
        ROOT.gErrorIgnoreLevel = 7000
        # Fits and fit files
        self.fitFile = "Fits/2016/FOG.pkl"               # The fit file, can contain both HLT and L1 triggers
        self.InputFitHLT = None         # The fit information for the HLT triggers
        self.InputFitL1 = None          # The fit information for the L1 triggers
        # DBParser
        self.parser = DBParser()        # A database parser
        # Rates
        self.HLTRates = None            # HLT rates
        self.L1Rates = None             # L1 rates
        self.Rates = None               # Combined L1 and HLT rates
        self.deadTimeData = {}          # initializing deadTime dict
        # Run control
        self.runNumber = -1             # The number of the current run
        self.numBunches = [-1, -1]     # Number of [target, colliding] bunches
        # Lumisection control
        self.lastLS = 1                 # The last LS that was processed last segment
        self.currentLS = 1              # The latest LS written to the DB
        self.slidingLS = -1             # The number of LS to average over, use -1 for no sliding LS
        self.useLSRange = False         # Only look at LS in a certain range
        # Mode
        self.triggerMode = None         # The trigger mode
        self.mode = None                # Mode: cosmics, circulate, physics
        # Columns header
        self.displayRawRates = False    # display raw rates, to display prescaled rates, set = True
        self.pileUp = True              # derive expected rate as a function of the pileUp, and not the luminosity
        # Triggers
        self.cosmics_triggerList = "monitorlist_COSMICS.list" #default list used when in cosmics mode
        self.collisions_triggerList = "monitorlist_COLLISIONS.list" #default list used when in collision mode 
        self.triggerList = ""           # A list of all the L1 and HLT triggers we want to monitor
        self.userSpecTrigList = False   # User specified trigger list 
        self.usableHLTTriggers = []     # HLT Triggers active during the run that we have fits for (and are in the HLT trigger list if it exists)
        self.otherHLTTriggers = []      # HLT Triggers active during the run that are not usable triggers
        self.usableL1Triggers = []      # L1 Triggers active during the run that have fits for (and are in the L1 trigger list if it exists)
        self.otherL1Triggers = []       # L1 Triggers active during that run that are not usable triggers
        self.redoTList = True           # Whether we need to update the trigger lists
        self.useAll = False             # If true, we will print out the rates for all the HLT triggers
        self.useL1 = False              # If true, we will print out the rates for all the L1 triggers
        self.totalHLTTriggers = 0       # The total number of HLT Triggers on the menu this run
        self.totalL1Triggers = 0        # The total number of L1 Triggers on the menu this run
        self.fullL1HLTMenu = []
        self.ignoreStrings = ["Calibration","L1Tech"]
        # Restrictions
        self.removeZeros = False        # If true, we don't show triggers that have zero rate
        self.requireLumi = False        # If true, we only display tables when aveLumi is not None
        # Trigger behavior
        self.percAccept = 50.0          # The acceptence for % diff
        self.devAccept = 5              # The acceptance for deviation
        self.badRates = {}              # A dictionary: [ trigger name ] { num consecutive bad , whether the trigger was bad last time we checked, rate, expected, dev }
        self.recordAllBadTriggers = {}  # A dictionary: [ trigger name ] < total times the trigger was bad >
        self.maxCBR = 3                 # The maximum consecutive db queries a trigger is allowed to deviate from prediction by specified amount before it's printed out
        self.displayBadRates = -1       # The number of bad rates we should show in the summary. We use -1 for all
        self.usePerDiff = False         # Whether we should identify bad triggers by perc diff or deviatoin
        self.sortRates = True           # Whether we should sort triggers by their rates
        self.maxHLTRate = 500           # The maximum prescaled rate we allow an HLT Trigger to have
        self.maxL1Rate = 30000          # The maximum prescaled rate we allow an L1 Trigger to have
        # Other options
        self.quiet = False              # Prints fewer messages in this mode
        self.noColors = False           # Special formatting for if we want to dump the table to a file
        self.sendMailAlerts_static = True      # Whether we should send alert mails
        self.sendMailAlerts_dynamic = self.sendMailAlerts_static 
        self.sendAudioAlerts = False     # Whether we should send audio warning messages in the control room (CAUTION)
        self.isUpdating = True          # flag to determine whether or not we're receiving new LS
        self.showStreams = False        # Whether we should print stream information
        self.showPDs = False            # Whether we should print pd information
        self.totalStreams = 0           # The total number of streams
        self.maxStreamRate = 1000000    # The maximum rate we allow a "good" stream to have
        self.maxPDRate = 10000000       # The maximum rate we allow a "good" pd to have        
        self.lumi_ave = "NONE"
        self.pu_ave = "NONE"
        self.deadTimeCorrection = True  # correct the rates for dead time
        self.scale_sleeptime = 1.0      # Scales the length of time to wait before sending another query (1.0 = 60sec, 2.0 = 120sec, etc)


        self.badTriggers = {}       #[trigger name] <List of trigger infos>

    # Use: Opens a file containing a list of trigger names and adds them to the RateMonitor class's trigger list
    # Note: We do not clear the trigger list, this way we could add triggers from multiple files to the trigger list
    # -- fileName: The name of the file that trigger names are contained in
    # Returns: (void)
    def loadTriggersFromFile(self, fileName):
        try:
            file = open(fileName, 'r')
        except:
            print "File", fileName, "(a trigger list file) failed to open."
            return
        
        allTriggerNames = file.read().split() # Get all the words, no argument -> split on any whitespace
        TriggerList = []
        for triggerName in allTriggerNames:
            # Recognize comments
            if triggerName[0]=='#': continue
            try:
                if not str(triggerName) in TriggerList:
                    TriggerList.append(stripVersion(str(triggerName)))
            except:
                print "Error parsing trigger name in file", fileName
        return TriggerList

    # Use: Formats the header string
    def getHeader(self):
        # Define spacing and header
        maxNameHLT = 0
        maxNameL1 = 0
        if len(self.usableHLTTriggers)>0 or len(self.otherHLTTriggers)>0:
            maxNameHLT = max([len(trigger) for trigger in self.usableHLTTriggers+self.otherHLTTriggers])
        if len(self.usableL1Triggers)>0 or len(self.otherL1Triggers)>0:
            maxNameL1 = max([len(trigger) for trigger in self.usableL1Triggers+self.otherL1Triggers])

        # Make the name spacing at least 90
        maxName = max([maxNameHLT, maxNameL1, 90])
        
        self.spacing = [maxName + 5, 14, 14, 14, 14, 14, 0]
        self.spacing[5] = max( [ 181 - sum(self.spacing), 0 ] )

        self.header = ""                # The table header
        self.header += stringSegment("* TRIGGER NAME", self.spacing[0])
        if (self.displayRawRates): self.header += stringSegment("* RAW [Hz]", self.spacing[1])
        else: self.header += stringSegment("* ACTUAL [Hz]", self.spacing[1])
        self.header += stringSegment("* EXPECTED", self.spacing[2])
        self.header += stringSegment("* % DIFF", self.spacing[3])
        self.header += stringSegment("* DEVIATION", self.spacing[4])
        self.header += stringSegment("* AVE PS", self.spacing[5])
        self.header += stringSegment("* COMMENTS", self.spacing[6])
        self.hlength = 181 #sum(self.spacing)
        
    # Use: Runs the program
    # Returns: (void)
    def run(self):
        # Load the fit and trigger list
        if self.fitFile != "":
            inputFit = self.loadFit(self.fitFile)
            for triggerName in inputFit:
                if triggerName[0:3] == "L1_":
                    if self.InputFitL1 is None: self.InputFitL1 = {}
                    self.InputFitL1[triggerName] = inputFit[triggerName]
                elif triggerName[0:4] == "HLT_":
                    if self.InputFitHLT is None: self.InputFitHLT = {}
                    self.InputFitHLT[triggerName] = inputFit[triggerName]
        
        # Sort trigger list into HLT and L1 trigger lists
        if self.triggerList != "":
            for triggerName in self.triggerList:
                if triggerName[0:3] == "L1_":
                    self.TriggerListL1.append(triggerName)
                else:
                    self.TriggerListHLT.append(triggerName)
        # If there aren't preset trigger lists, use all the triggers that we can fit
        else:
            if not self.InputFitHLT is None: self.TriggerListHLT = self.InputFitHLT.keys()
            if not self.InputFitL1 is None: self.TriggerListL1 = self.InputFitL1.keys()

        while True:
            try:
                previous_run_number = self.runNumber
                
                #self.updateRunInfo(previous_run_number)
                self.runNumber, _, _, mode = self.parser.getLatestRunInfo()
                self.triggerMode = mode[0]

## ---Run loop start---

                is_new_run = ( previous_run_number != self.runNumber or self.runNumber < 0 ) 
                if is_new_run:
                    print "Starting a new run: Run %s" % (self.runNumber)
                    self.lastLS = 1
                    self.currentLS = 0
                    self.setMode()
                    physicsActive,avgLumi,avgDeadTime,avgL1rate,PScol = self.queryDB()
                    self.redoTriggerLists()
                else:
                    physicsActive,avgLumi,avgDeadTime,avgL1rate,PScol = self.queryDB()

                self.lumi_ave = avgLumi

                #Construct (or reconstruct) trigger lists
                if self.redoTList:
                    self.redoTriggerLists()

                if len(self.HLTRates) == 0 or len(self.L1Rates) == 0:
                    self.redoTList = True

                # Make sure there is info to use        
                if len(self.HLTRates) == 0 and len(self.L1Rates) == 0:
                    print "No new information can be retrieved. Waiting... (There may be no new LS, or run active may be false)"
                    continue

                trigger_table = self.getTriggerTable(physicsActive,avgLumi)

                self.checkTriggers(avgLumi,trigger_table)

                # If there are lumisection to show, print info for them
                if self.currentLS > self.lastLS:
                    self.printTable(trigger_table,physicsActive,avgLumi,avgDeadTime,avgL1rate,PScol)
                    self.isUpdating = True
                else:
                    self.isUpdating = False
                    print "Not enough lumisections. Last LS was %s, current LS is %s. Waiting." % (self.lastLS, self.currentLS)

## ---Run loop start---

                self.mailSender()
                self.sleepWait()

            except KeyboardInterrupt:
                print "Quitting. Bye."
                break
    
    # Queries the Database and updates trigger entries
    def queryDB(self):
        # Get Rates: [triggerName][LS] { raw rate, prescale }
        if not self.useLSRange:
            self.HLTRates = self.parser.getRawRates(self.runNumber, self.lastLS)
            self.L1Rates = self.parser.getL1RawRates(self.runNumber, self.lastLS)
            self.streamData = self.parser.getStreamData(self.runNumber, self.lastLS)
            self.pdData = self.parser.getPrimaryDatasets(self.runNumber, self.lastLS)
        else:
            self.HLTRates = self.parser.getRawRates(self.runNumber, self.LSRange[0], self.LSRange[1])
            self.L1Rates = self.parser.getL1RawRates(self.runNumber, self.LSRange[0], self.LSRange[1])
            self.streamData = self.parser.getStreamData(self.runNumber, self.LSRange[0], self.LSRange[1])
            self.pdData = self.parser.getPrimaryDatasets(self.runNumber, self.LSRange[0], self.LSRange[1])
        self.totalStreams = len(self.streamData.keys())
        self.Rates = {}
        self.Rates.update(self.HLTRates)
        self.Rates.update(self.L1Rates)
        self.totalHLTTriggers = len(self.HLTRates.keys())
        self.totalL1Triggers = len(self.L1Rates.keys())


        lslist = []
        for trig in self.Rates.keys():
            if len(self.Rates[trig]) > 0: lslist.append(max(self.Rates[trig]))
        # Update lastLS
        self.lastLS = self.currentLS
        # Update current LS
        if len(lslist) > 0: self.currentLS = max(lslist)

        if self.useLSRange: # Adjust runs so we only look at those in our range
            self.slidingLS = -1 # No sliding LS window
            self.lastLS = max( [self.lastLS, self.LSRange[0]] )
            self.currentLS = min( [self.currentLS, self.LSRange[1] ] )

        # Get the inst lumi
        avgDeadTime = 0
        try:
            self.deadTimeData = self.parser.getDeadTime(self.runNumber)
            avgDeadTime = 0
        except:
            self.deadTimeData = {}
            avgDeadTime = None
            print "Error getting deadtime data"
        # Get total L1 rate
        l1rate = 0
        try:
            l1rateData = self.parser.getL1rate(self.runNumber)
            avgL1rate = 0
        except:
            l1rateData = {}
            avgL1rate = None
            print "Error getting total L1 rate data"


        self.startLS = self.lastLS
        physicsActive = False
        avgLumi = 0
        PScol = -1
        if self.mode != "cosmics":
            lumiData = self.parser.getLumiInfo(self.runNumber, self.startLS, self.currentLS)
            self.numBunches = self.parser.getNumberCollidingBunches(self.runNumber)
            # Find the average lumi since we last checked
            count = 0
            # Get luminosity (only for non-cosmic runs)
            #for LS, instLumi, psi, physics in lumiData:
            for LS, instLumi, psi, physics, all_subSys_good in lumiData:
                # If we are watching a certain range, throw out other LS
                if self.useLSRange and (LS < self.LSRange[0] or LS > self.LSRange[1]): continue

                # Average our instLumi
                if not instLumi is None and physics:
                    physicsActive = True
                    PScol = psi
                    if not avgDeadTime is None and self.deadTimeData.has_key(LS): 
                        avgDeadTime += self.deadTimeData[LS]
                    else: 
                        avgDeadTime = 0
                    if not avgL1rate is None and l1rateData.has_key(LS):
                        avgL1rate += l1rateData[LS]
                    else:
                        avgL1rate = 0
                    avgLumi += instLumi
                    count += 1
            if count == 0:
                avgLumi = None
            else:
                avgLumi /= float(count)
                avgDeadTime /= float(count)
                avgL1rate /= float(count)
        else:
            count = 0
            avgLumi = None
            for LS in l1rateData.keys():
                if not avgDeadTime is None and self.deadTimeData.has_key(LS):
                    avgDeadTime += self.deadTimeData[LS]
                else:
                    avgDeadTime = 0

                if not avgL1rate is None and l1rateData.has_key(LS):
                    avgL1rate += l1rateData[LS]
                else:
                    avgL1rate = 0
            if not count == 0:
                avgDeadTime /= float(count)
                avgL1rate /= float(count)

        if self.numBunches[0] > 0 and not avgLumi == None:
            self.pu_ave = avgLumi/self.numBunches[0]*ppInelXsec/orbitsPerSec
        else:
            self.pu_ave = "NONE"

        return physicsActive,avgLumi,avgDeadTime,avgL1rate,PScol

    def getTriggerTable(self, physicsActive, avgLumi):
        # A list of tuples, each a row in the table: 
        #   ( { trigger, rate, predicted rate, sign of % diff, abs % diff, sign of sigma, abs sigma,
        #       ave PS, doPred, comment } )
        table = {}

        #Calculate trigger information
        #for trigger in self.triggerList:
        tmp_trigger_list = []
        tmp_trigger_list += self.HLTRates.keys()
        tmp_trigger_list += self.L1Rates.keys()
        #for trigger in self.fullL1HLTMenu:
        for trigger in tmp_trigger_list:
        #for trigger in self.Rates.keys():
            perc = 0
            dev = 0
            aveRate = 0
            properAvePSRate = 0
            avePS = 0
            avePSExpected = 0
            count = 0
            comment = ""
            doPred = physicsActive and self.mode == "collisions"

            if not self.Rates.has_key(trigger): continue

            # If cosmics, don't do predictions
            if self.mode == "cosmics": doPred = False

            # Calculate rate
            if self.mode != "cosmics" and doPred:
                if not avgLumi is None:
                    if trigger in self.TriggerListL1:
                        self.L1 = True
                    else:
                        self.L1 = False
                    expected = self.calculateRate(trigger, avgLumi)
                    if expected < 0: expected = 0                # Don't let expected value be negative
                    avePSExpected = expected
                    # Get the mean square error (standard deviation)
                    mse = self.getMSE(trigger)
                else:
                    expected = None
                    avePSExpected = None
                    mse = None

            correct_for_deadtime = self.deadTimeCorrection
            if trigger[0:3]=="L1_": correct_for_deadtime = False

            for LS in self.Rates[trigger].keys():
                if LS < self.startLS or LS > self.currentLS: continue

                prescale = self.Rates[trigger][LS][1]
                rate = self.Rates[trigger][LS][0]

                try:
                    deadTime = self.deadTimeData[LS]
                except:
                    print "trouble getting deadtime for LS: ", LS," setting DT to zero"
                    deadTime = 0.0

                if correct_for_deadtime: rate *= 1. + (deadTime/100.)
                    
                if prescale > 0: 
                    properAvePSRate += rate/prescale
                else:
                    properAvePSRate += rate

                aveRate += rate
                count += 1
                avePS += prescale

            if count > 0:
                if aveRate == 0: comment += "0 counts "
                aveRate /= count
                properAvePSRate /= count
                avePS /= count
            else:
                #comment += "PS=0"
                comment += "No rate yet "
                doPred = False

            if doPred and not avePSExpected is None and avePS > 1: 
                avePSExpected /= avePS

            # skips if we are not making predictions for this trigger and we are throwing zeros
            #if not doPred and self.removeZeros and aveRate == 0: 
            #    continue

            if self.displayRawRates:
                rate_val = aveRate
            else:
                rate_val = properAvePSRate

            if doPred and not expected is None: 
                #Do nothing
                xkcd = ""
            else: 
                avePSExpected = ""

            if doPred:
                if expected == None:
                    perc = "UNDEF"
                    dev = "UNDEF"
                    sign = 1
                else:
                    diff = aveRate - expected
                    if expected != 0: 
                        perc = 100*diff/expected
                    else: 
                        perc = "INF"

                    if mse != 0:
                        dev = diff/mse
                        if abs(dev)>1000000: dev = ">1E6"
                    else: 
                        dev = "INF"

                    if perc > 0: 
                        sign = 1
                    else:
                        sign = -1

                    if perc != "INF":
                        perc = abs(perc)

                    if dev != "INF":
                        dev = abs(dev)
            else:
                sign = "" # No prediction, so no sign of a % diff
                perc = "" # No prediction, so no % diff
                dev = ""  # No prediction, so no deviation

            table[trigger] = [rate_val,avePSExpected,sign,perc,sign,dev,avePS,doPred,comment]

        return table

    def checkTriggers(self,avgLumi,trigger_table):
        # Reset counting variable
        self.normal = 0
        self.bad = 0

        tmp_list = []
        for elem in self.triggerList:
            if elem in tmp_list:
                continue
            else:
                tmp_list.append(elem)

        #Remove any duplicates
        for elem in self.fullL1HLTMenu:
            if elem in tmp_list:
                continue
            else:
                tmp_list.append(elem)

        #Check if the trigger is good or bad
        for trigger in tmp_list:
            #table[trigger] = [rate_val,avePSExpected,sign,perc,sign,dev,avPS,doPred,comment]
            #if not self.Rates.has_key(trigger): continue
            if not trigger_table.has_key(trigger): 
                print "Table missing triggers: %s" % trigger
                continue

            properAvePSRate = trigger_table[trigger][0]
            avePSExpected = trigger_table[trigger][1]
            perc = trigger_table[trigger][3]
            dev = trigger_table[trigger][5]
            avePS = trigger_table[trigger][6]
            doPred = trigger_table[trigger][7]

            if doPred:
                # Check for bad rates.
                if self.isBadTrigger(perc, dev, properAvePSRate, trigger[0:3]=="L1_"):
                    self.bad += 1
                    # Record if a trigger was bad
                    if not self.recordAllBadRates.has_key(trigger):
                        self.recordAllBadRates[trigger] = 0
                    self.recordAllBadRates[trigger] += 1
                    # Record consecutive bad rates
                    if not self.badRates.has_key(trigger):
                        self.badRates[trigger] = [1, True, properAvePSRate, avePSExpected, dev, avePS ]
                    else:
                        last = self.badRates[trigger]
                        self.badRates[trigger] = [ last[0]+1, True, properAvePSRate, avePSExpected, dev, avePS ]
                else:
                    self.normal += 1
                    # Remove warning from badRates
                    if self.badRates.has_key(trigger): del self.badRates[trigger]
                        
            else:
                if self.isBadTrigger("", "", properAvePSRate, trigger[0:3]=="L1_"):
                    self.bad += 1
                    # Record if a trigger was bad
                    if not self.recordAllBadRates.has_key(trigger):
                        self.recordAllBadRates[trigger] = 0
                    self.recordAllBadRates[trigger] += 1
                    # Record consecutive bad rates
                    if not self.badRates.has_key(trigger):
                        self.badRates[trigger] = [ 1, True, properAvePSRate, -999, -999, -999 ]
                    else:
                        last = self.badRates[trigger]
                        self.badRates[trigger] = [ last[0]+1, True, properAvePSRate, -999, -999, -999 ]
                else:
                    self.normal += 1
                    # Remove warning from badRates
                    if self.badRates.has_key(trigger): del self.badRates[trigger]

    # Use: Prints a section of a table, i.e. all the triggers in a trigger list (like usableHLTTriggers, otherHLTTriggers, etc)
    def printTableSection(self, triggerList, trigger_table, hasPred, avgLumi=0):
        tmp_dict = {}
        # Find the correct triggers
        for trigger in trigger_table:
            if trigger in triggerList:
                tmp_dict[trigger] = trigger_table[trigger]

        sorted_list = tmp_dict.items()
        #print "Trigger_table: ",trigger_table.keys()
        #table --> [trigger]<rate_val,avePSExpected,sign,perc,sign,dev,avPS,doPred,comment>
        if hasPred:
            if self.usePerDiff:
                #Sort by % diff
                sorted_list.sort(key=lambda tup: tup[1][3], reverse = True)
            else:
                #Sort by deviation
                sorted_list.sort(key=lambda tup: tup[1][5], reverse = True)
        else:
            sorted_list.sort(key=lambda tup: tup[1][0], reverse = True)

        #for trigger, rate, pred, sign, perdiff, dsign, dev, avePS, comment in self.tableData:
        for tup in sorted_list:
            trigger = tup[0]
            rate = trigger_table[trigger][0]
            pred = trigger_table[trigger][1]
            sign = trigger_table[trigger][2]
            perdiff = trigger_table[trigger][3]
            dsign = trigger_table[trigger][4]
            dev = trigger_table[trigger][5]
            avePS = trigger_table[trigger][6]
            doPred = trigger_table[trigger][7]
            comment = trigger_table[trigger][8]

            info = stringSegment("* "+trigger, self.spacing[0])
            info += stringSegment("* "+"{0:.2f}".format(rate), self.spacing[1])

            if pred != "":
                info += stringSegment("* "+"{0:.2f}".format(pred), self.spacing[2])
            else:
                info += stringSegment("", self.spacing[2])

            if perdiff == "":
                info += stringSegment("", self.spacing[3])
            elif perdiff == "INF":
                info += stringSegment("* INF", self.spacing[3])
            else:
                info += stringSegment("* "+"{0:.2f}".format(sign*perdiff), self.spacing[3])

            if dev == "":
                info += stringSegment("", self.spacing[4])
            elif dev == "INF" or dev == ">1E6":
                info += stringSegment("* "+dev, self.spacing[4])
            else:
                info += stringSegment("* "+"{0:.2f}".format(dsign*dev), self.spacing[4])

            info += stringSegment("* "+"{0:.2f}".format(avePS), self.spacing[5])
            info += stringSegment("* "+comment, self.spacing[6])

            # Color the bad triggers with warning colors
            #if avePS != 0 and self.isBadTrigger(perdiff, dev, rate/avePS, trigger[0:3]=="L1_"):
            if avePS != 0 and self.badRates.has_key(trigger):
                if not self.noColors: write(bcolors.WARNING) # Write colored text 
                print info
                if not self.noColors: write(bcolors.ENDC)    # Stop writing colored text
            # Don't color normal triggers
            else:
                print info

    def printTable(self,trigger_table,physicsActive,avgLumi,avgDeadTime,avgL1rate,PScol):
        self.printHeader()

        hasPred = physicsActive and self.mode == "collisions"
        # Print the triggers that we can make predictions for
        anytriggers = False
        if len(self.usableHLTTriggers) > 0:
            print '*' * self.hlength
            print "Predictable HLT Triggers (ones we have a fit for)"
            print '*' * self.hlength
            anytriggers = True
        self.L1 = False
        self.printTableSection(self.usableHLTTriggers,trigger_table,hasPred,avgLumi)
        if len(self.usableL1Triggers) > 0:
            print '*' * self.hlength
            print "Predictable L1 Triggers (ones we have a fit for)"
            print '*' * self.hlength
            anytriggers = True
        self.L1 = True
        self.printTableSection(self.usableL1Triggers,trigger_table,hasPred,avgLumi)


        #check the full menu for paths deviating past thresholds
        fullMenu_fits = False
        #for trigger in self.fullL1HLTMenu: self.getTriggerData(trigger, fullMenu_fits, avgLumi)

        # Print the triggers that we can't make predictions for (only for certain modes)
        if self.useAll or self.mode != "collisions" or self.InputFitHLT is None:
            print '*' * self.hlength
            print "Unpredictable HLT Triggers (ones we have no fit for or do not try to fit)"
            print '*' * self.hlength
            self.L1 = False
            self.printTableSection(self.otherHLTTriggers,trigger_table,False)
            self.printTableSection(self.otherL1Triggers,trigger_table,False)
            anytriggers = True
        if self.useL1:
            print '*' * self.hlength
            print "Unpredictable L1 Triggers (ones we have no fit for or do not try to fit)"
            print '*' * self.hlength
            self.L1 = True
            self.printTableSection(self.otherL1Triggers,trigger_table,False)
            anytriggers = True

        if not anytriggers:
            print '*' * self.hlength
            print "\n --- No useable triggers --- \n"

        # Print stream data
        if self.showStreams:
            print '*' * self.hlength
            streamSpacing = [ 50, 20, 25, 25, 25, 25 ]
            head = stringSegment("* Stream name", streamSpacing[0])
            head += stringSegment("* NLumis", streamSpacing[1])
            head += stringSegment("* Events", streamSpacing[2])
            head += stringSegment("* Stream rate [Hz]", streamSpacing[3])
            head += stringSegment("* File size [GB]", streamSpacing[4])
            head += stringSegment("* Stream bandwidth [GB/s]", streamSpacing[5])
            print head
            print '*' * self.hlength
            for name in sorted(self.streamData.keys()):
                count = 0
                streamsize = 0
                aveBandwidth = 0
                aveRate = 0
                for LS, rate, size, bandwidth in self.streamData[name]:
                    streamsize += size
                    aveRate += rate
                    aveBandwidth += bandwidth
                    count += 1
                if count > 0:
                    aveRate /= count
                    streamsize /= (count*1000000000.0)
                    aveBandwidth /= (count*1000000000.0)
                    row = stringSegment("* "+name, streamSpacing[0])
                    row += stringSegment("* "+str(int(count)), streamSpacing[1])
                    row += stringSegment("* "+str(int(aveRate*23.3*count)), streamSpacing[2])
                    row += stringSegment("* "+"{0:.2f}".format(aveRate), streamSpacing[3])
                    row += stringSegment("* "+"{0:.2f}".format(streamsize), streamSpacing[4])
                    row += stringSegment("* "+"{0:.5f}".format(aveBandwidth), streamSpacing[5])
                    if not self.noColors and aveRate > self.maxStreamRate: write(bcolors.WARNING) # Write colored text
                    print row
                    if not self.noColors and aveRate > self.maxStreamRate: write(bcolors.ENDC)    # Stop writing colored text 
                else: pass

        # Print PD data
        if self.showPDs:
            print '*' * self.hlength
            pdSpacing = [ 50, 20, 25, 25]
            head = stringSegment("* Primary Dataset name", pdSpacing[0])
            head += stringSegment("* NLumis", pdSpacing[1])
            head += stringSegment("* Events", pdSpacing[2])
            head += stringSegment("* Dataset rate [Hz]", pdSpacing[3])
            print head
            print '*' * self.hlength
            for name in self.pdData.keys():
                count = 0
                aveRate = 0
                for LS, rate in self.pdData[name]:
                    aveRate += rate
                    count += 1
                if count > 0:
                    aveRate /= count
                    row = stringSegment("* "+name, pdSpacing[0])
                    row += stringSegment("* "+str(int(count)), pdSpacing[1])
                    row += stringSegment("* "+str(int(aveRate*23.3*count)), pdSpacing[2])
                    row += stringSegment("* "+"{0:.2f}".format(aveRate), pdSpacing[3])
                    if not self.noColors and aveRate > self.maxPDRate: write(bcolors.WARNING) # Write colored text
                    print row
                    if not self.noColors and aveRate > self.maxPDRate: write(bcolors.ENDC)    # Stop writing colored text 
                else: pass

        # Closing information
        print '*' * self.hlength
        print "SUMMARY:"
        if self.mode == "collisions": print "Triggers in Normal Range: %s   |   Triggers outside Normal Range: %s" % (self.normal, self.bad)
        if self.mode == "collisions":
            print "Prescale column index:", 
            if PScol == 0:
                if not self.noColors and PScol == 0 and self.mode != "other": write(bcolors.WARNING) # Write colored text
                print PScol, "\t0 - Column 0 is an emergency column in collision mode, please select the proper column"
                if not self.noColors and PScol == 0 and self.mode != "other": write(bcolors.ENDC)    # Stop writing colored text 
            else:
                print PScol
        try:
            print "Average inst. lumi: %.0f x 10^30 cm-2 s-1" % (avgLumi)
        except:
            print "Average inst. lumi: Not available"
        print "Total L1 rate: %.0f Hz" % (avgL1rate)
        print "Average dead time: %.2f %%" % (avgDeadTime)
        try: 
            print "Average PU: %.2f" % (self.pu_ave)
        except: 
            print "Average PU: %s" % (self.pu_ave)
        print '*' * self.hlength

    def updateRunInfo(self, previous_run):
        #updates:
        #run number
        #trigger mode (and trigger lists)

        #self.runNumber, self.triggerMode = self.parser.getLatestRunInfo()
        self.runNumber, _, _, mode = self.parser.getLatestRunInfo()
        self.triggerMode = mode[0]
        
        is_new_run = ( previous_run != self.runNumber or self.runNumber < 0 ) 
        if is_new_run:
            print "Starting a new run: Run %s" % (self.runNumber)
            self.lastLS = 1
            self.currentLS = 0
            self.setMode()
            self.redoTriggerLists()
        
    def setMode(self):
        self.sendMailAlerts_dynamic = self.sendMailAlerts_static
        try:
            self.triggerMode = self.parser.getTriggerMode(self.runNumber)[0]
        except:
            self.triggerMode = "Other"
        if self.triggerMode.find("cosmics") > -1:
            self.mode = "cosmics"
        elif self.triggerMode.find("circulate") > -1:
            self.mode = "circulate"
        elif self.triggerMode.find("collisions") > -1:
            self.mode = "collisions"
        elif self.triggerMode == "MANUAL":
            self.mode = "MANUAL"
        elif self.triggerMode.find("highrate") > -1:
            self.mode = "other"
            #self.maxHLTRate = 100000
            #self.maxL1Rate = 100000
        else: self.mode = "other"

    # Use: Remakes the trigger lists
    def redoTriggerLists(self):
        self.redoTList = False
        # Reset the trigger lists
        self.usableHLTTriggers = []
        self.otherHLTTriggers = []
        self.usableL1Triggers = []
        self.otherL1Triggers = []
        self.fullL1HLTMenu = []
        # Reset bad rate records
        self.badRates = {}           # A dictionary: [ trigger name ] { num consecutive bad, trigger bad last check, rate, expected, dev }
        self.recordAllBadRates = {}  # A dictionary: [ trigger name ] < total times the trigger was bad >

        #set trigger lists automatically based on mode
        if not self.useAll and not self.userSpecTrigList:
            if self.mode == "cosmics" or self.mode == "circulate":
                self.triggerList = self.loadTriggersFromFile(self.cosmics_triggerList)
                print "monitoring triggers in: ", self.cosmics_triggerList
            elif self.mode == "collisions":
                self.triggerList = self.loadTriggersFromFile(self.collisions_triggerList)
                print "monitoring triggers in: ", self.collisions_triggerList
            else:
                self.triggerList = ""
                print "No lists to monitor: trigger mode not recognized"

            self.TriggerListL1 = []
            self.TriggerListHLT = []
            for triggerName in self.triggerList:
                if triggerName[0:3]=="L1_":
                    self.TriggerListL1.append(triggerName)
                elif triggerName[0:4]=="HLT_":
                    self.TriggerListHLT.append(triggerName)

        # Re-make trigger lists
        #print self.InputFitHLT.keys()
        for trigger in self.HLTRates.keys():
            if (not self.InputFitHLT is None and self.InputFitHLT.has_key(trigger)) and \
            (len(self.TriggerListHLT) !=0 and trigger in self.TriggerListHLT):
                self.usableHLTTriggers.append(trigger)
            elif trigger[0:4] == "HLT_" and (self.triggerList == "" or trigger in self.TriggerListHLT):
                self.otherHLTTriggers.append(trigger)
            elif (trigger[0:4] == "HLT_"): self.fullL1HLTMenu.append(trigger) 

        for trigger in self.L1Rates.keys():
            if (not self.InputFitL1 is None and self.InputFitL1.has_key(trigger)) and \
            (len(self.TriggerListL1) != 0 and trigger in self.TriggerListL1):
                self.usableL1Triggers.append(trigger)
            elif trigger[0:3] == "L1_" and (self.triggerList =="" or trigger in self.TriggerListL1):
                self.otherL1Triggers.append(trigger)
            elif (trigger[0:3] == "L1_"): self.fullL1HLTMenu.append(trigger)
                        
        self.getHeader()
        
    # Use: Prints the table header
    def printHeader(self):
        print "\n\n", '*' * self.hlength
        print "INFORMATION:"
        print "Run Number: %s" % (self.runNumber)
        print "LS Range: %s - %s" % (self.startLS, self.currentLS)
        print "Latest LHC Status: %s" % self.parser.getLHCStatus()[1]
        print "Number of colliding bunches: %s" % self.numBunches[0]
        print "Trigger Mode: %s (%s)" % (self.triggerMode, self.mode)
        print "Number of HLT Triggers: %s \nNumber of L1 Triggers: %s" % (self.totalHLTTriggers, self.totalL1Triggers)
        print "Number of streams:", self.totalStreams
        print '*' * self.hlength
        print self.header
        
    # Use: Returns whether a given trigger is bad
    # Returns: Whether the trigger is bad
    def isBadTrigger(self, perdiff, dev, psrate, isL1):
        if psrate == 0.0: return False
        if ( (self.usePerDiff and perdiff!="INF" and perdiff!="" and abs(perdiff)>self.percAccept) or (dev!="INF" and dev!="" and (dev==">1E6" or abs(dev)>self.devAccept)))\
        or (perdiff!="INF" and perdiff!="" and abs(perdiff)>self.percAccept and dev!="INF" and dev!="" and abs(dev)>self.devAccept)\
        or (isL1 and psrate>self.maxL1Rate)\
        or (not isL1 and psrate>self.maxHLTRate): return True
        
        return False
            
    def mailSender(self):
        if self.displayBadRates != 0:
            count = 0
            if self.displayBadRates != -1: write("First %s triggers that are bad: " % (self.displayBadRates)) 
            elif len(self.badRates) > 0 : write("All triggers deviating past thresholds from fit and/or L1 rate > %s Hz, HLT rate > %s Hz: " %(self.maxL1Rate,self.maxHLTRate))
            for trigger in self.badRates:
                if self.badRates[trigger][1]:
                    count += 1
                    write(trigger)
                    if count != self.displayBadRates-1:
                        write(", ")
                if count == self.displayBadRates:
                    write(".....")
                    break
            print ""

        # Print warnings for triggers that have been repeatedly misbehaving
        mailTriggers = [] # A list of triggers that we should mail alerts about
        for trigger in self.badRates:
            if self.badRates[trigger][1]:
                if self.badRates[trigger][0] >= 1:
                    print "Trigger %s has been out of line for more than %.1f minutes" % (trigger, float(self.badRates[trigger][0])*self.scale_sleeptime)
                # We want to mail an alert whenever a trigger exits the acceptable threshold envelope
                if self.badRates[trigger][0] == self.maxCBR:
                    mailTriggers.append( [ trigger, self.badRates[trigger][2], self.badRates[trigger][3], self.badRates[trigger][4], self.badRates[trigger][5] ] )
        # Send mail alerts
        if len(mailTriggers) > 0 and self.isUpdating:
            if self.sendMailAlerts_static and self.sendMailAlerts_dynamic: self.sendMail(mailTriggers)
            if self.sendAudioAlerts: audioAlert()

    # Use: Sleeps and prints out waiting dots
    def sleepWait(self):
        if not self.quiet: print "Sleeping for %.1f sec before next query" % (60.0*self.scale_sleeptime)
        for iSleep in range(20):
            if not self.quiet: write(".")
            sys.stdout.flush()
            time.sleep(3.0*self.scale_sleeptime)
        sys.stdout.flush()
        print ""
            
    # Use: Loads the fit data from the fit file
    # Parameters:
    # -- fitFile: The file that the fit data is stored in (a pickle file)
    # Returns: The input fit data
    def loadFit(self, fileName):
        if fileName == "":
            return None
        InputFit = {} # Initialize InputFit (as an empty dictionary)
        # Try to open the file containing the fit info
        try:
            pkl_file = open(fileName, 'rb')
            InputFit = pickle.load(pkl_file)
            pkl_file.close()
        except:
            # File failed to open
            print "Error: could not open fit file: %s" % (fileName)
        return InputFit

    # Use: Calculates the expected rate for a trigger at a given ilumi based on our input fit
    def calculateRate(self, triggerName, ilum):
        # Make sure we have a fit for the trigger
        if not self.L1 and (self.InputFitHLT is None or not self.InputFitHLT.has_key(triggerName)):
            return 0
        elif self.L1 and ((self.InputFitL1 is None) or not self.InputFitL1.has_key(triggerName)):
            return 0
        # Get the param list
        if self.L1: paramlist = self.InputFitL1[triggerName]
        else: paramlist = self.InputFitHLT[triggerName]
        # Calculate the rate
        if paramlist[0]=="exp": funcStr = "%s + %s*expo(%s+%s*x)" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4]) # Exponential
        else: funcStr = "%s+x*(%s+ x*(%s+x*%s))" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4]) # Polynomial
        fitFunc = TF1("Fit_"+triggerName, funcStr)
        if self.pileUp:
            if self.numBunches[0] > 0:
                return self.numBunches[0]*fitFunc.Eval(ilum/self.numBunches[0]*ppInelXsec/orbitsPerSec)
            else:
                return 0
        return fitFunc.Eval(ilum)

    # Use: Gets the MSE of the fit
    def getMSE(self, triggerName):
        if not self.L1 and (self.InputFitHLT is None or not self.InputFitHLT.has_key(triggerName)):
            return 0
        elif self.L1 and ((self.InputFitL1 is None) or not self.InputFitL1.has_key(triggerName)):
            return 0
        if self.L1: paramlist = self.InputFitL1[triggerName]
        else: paramlist = self.InputFitHLT[triggerName]
        if self.pileUp:
            return self.numBunches[0]*paramlist[5]
        return paramlist[5] # The MSE

    # Use: Sends an email alert
    # Parameters:
    # -- mailTriggers: A list of triggers that we should include in the mail, ( { triggerName, aveRate, expected rate, standard dev } )
    # Returns: (void)
    def sendMail(self, mailTriggers):
        mail = "Run: %d, Lumisections: %s - %s \n" % (self.runNumber, self.lastLS, self.currentLS)
        try: mail += "Average inst. lumi: %.0f x 10^30 cm-2 s-1\n" % (self.lumi_ave)
        except: mail += "Average inst. lumi: %s x 10^30 cm-2 s-1\n" % (self.lumi_ave)
        
        try: mail += "Average PU: %.2f\n \n" % (self.pu_ave)
        except: mail += "Average PU: %s\n \n" % (self.pu_ave)
        
        mail += "Trigger rates deviating from acceptable and/or expected values: \n\n"

        for triggerName, rate, expected, dev, ps in mailTriggers:
        
            if self.numBunches[0] == 0:
                mail += "\n %s: Actual: %s Hz\n" % (stringSegment(triggerName, 35), rate)
            else:
                if expected > 0:
                    try: mail += "\n %s: Expected: %.1f Hz, Actual: %.1f Hz, Unprescaled Expected/nBunches: %.5f Hz, Unprescaled Actual/nBunches: %.5f Hz, Deviation: %.1f\n" % (stringSegment(triggerName, 35), expected, rate, expected*ps/self.numBunches[0], rate*ps/self.numBunches[0], dev)
                    except: mail += "\n %s: Expected: %s Hz, Actual: %s Hz, Unprescaled Expected/nBunches: %s Hz, Unprescaled Actual/nBunches: %s Hz, Deviation: %s\n" % (stringSegment(triggerName, 35), expected, rate, expected*ps/self.numBunches[0], rate*ps/self.numBunches[0], dev)
                    mail += "  *referenced fit: <https://raw.githubusercontent.com/cms-tsg-fog/RateMon/master/Fits/2016/plots/%s.png>\n" % (triggerName)                    
                else:
                    try: mail += "\n %s: Actual: %.1f Hz\n" % (stringSegment(triggerName, 35), rate)
                    except: mail += "\n %s: Actual: %s Hz\n" % (stringSegment(triggerName, 35), rate)

            try:
                wbm_url = self.parser.getWbmUrl(self.runNumber,triggerName,self.currentLS)
                if not wbm_url == "-": mail += "  *WBM rate: <%s>\n" % (wbm_url)
            except:
                print "WBM plot url query failed"

                
        mail += "\nWBM Run Summary: <https://cmswbm.web.cern.ch/cmswbm/cmsdb/servlet/RunSummary?RUN=%s> \n\n" % (self.runNumber)
        mail += "Email warnings triggered when: \n"
        mail += "   - L1 or HLT rates deviate by more than %s standard deviations from fit \n" % (self.devAccept)
        mail += "   - HLT rates > %s Hz \n" % (self.maxHLTRate)
        mail += "   - L1 rates > %s Hz \n" % (self.maxL1Rate)

        print "--- SENDING MAIL ---\n"+mail+"\n--------------------"
        mailAlert(mail)
Exemplo n.º 2
0
class ShiftMonitor:

    def __init__(self):
        # Suppress root warnings
        ROOT.gErrorIgnoreLevel = 7000
        # Fits and fit files
        self.fitFile = "Fits/2016/FOG.pkl"               # The fit file, can contain both HLT and L1 triggers
        #self.fitFile = "../HLT_Fit_Run275911-276244_Tot12_fit.pkl"
        #        self.fitFile = ""#fits__273013-273017/HLT_Fit_Run273013-273017_Tot12_fit.pkl"               # The fit file, can contain both HLT and L1 triggers
        self.InputFitHLT = None         # The fit information for the HLT triggers
        self.InputFitL1 = None          # The fit information for the L1 triggers
        # DBParser
        self.parser = DBParser()        # A database parser
        # Rates
        self.HLTRates = None            # HLT rates
        self.L1Rates = None             # L1 rates
        self.Rates = None               # Combined L1 and HLT rates
        self.deadTimeData = {}          # initializing deadTime dict
        # Run control
        self.lastRunNumber = -2         # The run number during the last segment
        self.runNumber = -1             # The number of the current run
        self.numBunches = [-1, -1]     # Number of [target, colliding] bunches
        # Running over a previouly done run
        self.assignedNum = False        # If true, we look at the assigned run, not the latest run
        self.LSRange = []               # If we want to only look at a range of LS from the run
        ##self.simulate = False           # Simulate running through and monitoring a previous run
        # Lumisection control
        self.lastLS = 1                 # The last LS that was processed last segment
        self.currentLS = 1              # The latest LS written to the DB
        self.slidingLS = -1             # The number of LS to average over, use -1 for no sliding LS
        self.useLSRange = False         # Only look at LS in a certain range
        # Mode
        self.triggerMode = None         # The trigger mode
        self.mode = None                # Mode: cosmics, circulate, physics
        # Columns header
        self.displayRawRates = False    # display raw rates, to display prescaled rates, set = True
        self.pileUp = True              # derive expected rate as a function of the pileUp, and not the luminosity
        # Triggers
        self.cosmics_triggerList = "monitorlist_COSMICS.list" #default list used when in cosmics mode
        self.collisions_triggerList = "monitorlist_COLLISIONS.list" #default list used when in collision mode 
        self.triggerList = ""           # A list of all the L1 and HLT triggers we want to monitor
        self.userSpecTrigList = False   # User specified trigger list 
        self.usableHLTTriggers = []     # HLT Triggers active during the run that we have fits for (and are in the HLT trigger list if it exists)
        self.otherHLTTriggers = []      # HLT Triggers active during the run that are not usable triggers
        self.usableL1Triggers = []      # L1 Triggers active during the run that have fits for (and are in the L1 trigger list if it exists)
        self.otherL1Triggers = []       # L1 Triggers active during that run that are not usable triggers
        self.redoTList = True           # Whether we need to update the trigger lists
        self.useAll = False             # If true, we will print out the rates for all the HLT triggers
        self.useL1 = False              # If true, we will print out the rates for all the L1 triggers
        self.totalHLTTriggers = 0       # The total number of HLT Triggers on the menu this run
        self.totalL1Triggers = 0        # The total number of L1 Triggers on the menu this run
        self.fullL1HLTMenu = []
        self.ignoreStrings = ["Calibration","L1Tech","BPTX","Bptx"]
        # Restrictions
        self.removeZeros = False        # If true, we don't show triggers that have zero rate
        # Trigger behavior
        self.percAccept = 50.0          # The acceptence for % diff
        self.devAccept = 5              # The acceptance for deviation
        self.badRates = {}              # A dictionary: [ trigger name ] { num consecutive bad , whether the trigger was bad last time we checked, rate, expected, dev }
        self.recordAllBadTriggers = {}  # A dictionary: [ trigger name ] < total times the trigger was bad >
        self.maxCBR = 3                 # The maximum consecutive db queries a trigger is allowed to deviate from prediction by specified amount before it's printed out
        self.displayBadRates = -1       # The number of bad rates we should show in the summary. We use -1 for all
        self.usePerDiff = False         # Whether we should identify bad triggers by perc diff or deviatoin
        self.sortRates = True           # Whether we should sort triggers by their rates
        self.maxHLTRate = 500           # The maximum prescaled rate we allow an HLT Trigger to have
        self.maxL1Rate = 30000          # The maximum prescaled rate we allow an L1 Trigger to have
        # Other options
        self.quiet = False              # Prints fewer messages in this mode
        self.noColors = False           # Special formatting for if we want to dump the table to a file
        self.sendMailAlerts_static = True      # Whether we should send alert mails
        self.sendMailAlerts_dynamic = self.sendMailAlerts_static      
        self.sendAudioAlerts = False    # Whether we should send audio warning messages in the control room (CAUTION)
        self.isUpdating = True          # flag to determine whether or not we're receiving new LS
        self.showStreams = False         # Whether we should print stream information
        self.showPDs = False             # Whether we should print pd information
        self.totalStreams = 0           # The total number of streams
        self.maxStreamRate = 1000000    # The maximum rate we allow a "good" stream to have
        self.maxPDRate = 250            # The maximum rate we allow a "good" pd to have        
        self.lumi_ave = "NONE"
        self.pu_ave = "NONE"
        self.deadTimeCorrection = True  # correct the rates for dead time
        self.scale_sleeptime = 2.0      # Scales the length of time to wait before sending another query (1.0 = 60sec, 2.0 = 120sec, etc)

    # Use: Opens a file containing a list of trigger names and adds them to the RateMonitor class's trigger list
    # Note: We do not clear the trigger list, this way we could add triggers from multiple files to the trigger list
    # -- fileName: The name of the file that trigger names are contained in
    # Returns: (void)
    def loadTriggersFromFile(self, fileName):
        try:
            file = open(fileName, 'r')
        except:
            print "File", fileName, "(a trigger list file) failed to open."
            return
        
        allTriggerNames = file.read().split() # Get all the words, no argument -> split on any whitespace
        TriggerList = []
        for triggerName in allTriggerNames:
            # Recognize comments
            if triggerName[0]=='#': continue
            try:
                if not str(triggerName) in TriggerList:
                    TriggerList.append(stripVersion(str(triggerName)))
            except:
                print "Error parsing trigger name in file", fileName
        return TriggerList

    # Use: Formats the header string
    # Returns: (void)
    def getHeader(self):
        # Define spacing and header
        maxNameHLT = 0
        maxNameL1 = 0
        if len(self.usableHLTTriggers)>0 or len(self.otherHLTTriggers)>0:
            maxNameHLT = max([len(trigger) for trigger in self.usableHLTTriggers+self.otherHLTTriggers])
        if len(self.usableL1Triggers)>0 or len(self.otherL1Triggers)>0:
            maxNameL1 = max([len(trigger) for trigger in self.usableL1Triggers+self.otherL1Triggers])

        # Make the name spacing at least 90
        maxName = max([maxNameHLT, maxNameL1, 90])
        
        self.spacing = [maxName + 5, 14, 14, 14, 14, 14, 0]
        self.spacing[5] = max( [ 181 - sum(self.spacing), 0 ] )

        self.header = ""                # The table header
        self.header += stringSegment("* TRIGGER NAME", self.spacing[0])
        if (self.displayRawRates): self.header += stringSegment("* RAW [Hz]", self.spacing[1])
        else: self.header += stringSegment("* ACTUAL [Hz]", self.spacing[1])
        self.header += stringSegment("* EXPECTED", self.spacing[2])
        self.header += stringSegment("* % DIFF", self.spacing[3])
        self.header += stringSegment("* DEVIATION", self.spacing[4])
        self.header += stringSegment("* AVE PS", self.spacing[5])
        self.header += stringSegment("* COMMENTS", self.spacing[6])
        self.hlength = 181 #sum(self.spacing)

    # Use: Runs the program
    # Returns: (void)
    def run(self):
        # Load the fit and trigger list
        if self.fitFile!="":
            inputFit = self.loadFit(self.fitFile)
            for triggerName in inputFit:
                if triggerName[0:3]=="L1_":
                    if self.InputFitL1 is None: self.InputFitL1 = {}
                    self.InputFitL1[stripVersion(triggerName)] = inputFit[triggerName]
                elif triggerName[0:4] =="HLT_":
                    if self.InputFitHLT is None: self.InputFitHLT = {}
                    self.InputFitHLT[stripVersion(triggerName)] = inputFit[triggerName]
        
        # Sort trigger list into HLT and L1 trigger lists
        if self.triggerList!="":
            for triggerName in self.triggerList:
                if triggerName[0:3]=="L1_":
                    self.TriggerListL1.append(triggerName)
                else:
                    self.TriggerListHLT.append(triggerName)
        # If there aren't preset trigger lists, use all the triggers that we can fit
        else:
            if not self.InputFitHLT is None: self.TriggerListHLT = self.InputFitHLT.keys()
            if not self.InputFitL1 is None: self.TriggerListL1 = self.InputFitL1.keys()

        # Get run info: The current run number, if the detector is collecting (?), if the data is good (?), and the trigger mode
        if not self.assignedNum:
            self.runNumber, _, _, mode = self.parser.getLatestRunInfo()
            self.triggerMode = mode[0]
            # Info message
            print "The current run number is %s." % (self.runNumber)
        # If we are observing a single run from the past
        ##elif not self.simulate:
        else:
            try:
                self.triggerMode = self.parser.getTriggerMode(self.runNumber)[0]
            except:
                self.triggerMode = "Other"
            self.getRates()
            self.runLoop()
            self.checkTriggers()
            return

        # If we are simulating a previous run
        #if self.simulate:
        #    self.simulateRun()
        #    return

        # Run as long as we can
        self.setMode()
        self.redoTList = True

        while True:
            try:
                # Check if we are still in the same run, get trigger mode
                self.lastRunNumber = self.runNumber
                self.runNumber, _, _, mode = self.parser.getLatestRunInfo()
                self.runLoop()      
                self.checkTriggers()
                self.sleepWait()
            except KeyboardInterrupt:
                print "Quitting. Bye."
                break
            
    ##def simulateRun(self):
    ##    modTime = 0
    ##    # Get the rates
    ##    self.triggerMode = self.parser.getTriggerMode(self.runNumber)[0]
    ##    # Set the trigger mode
    ##    self.setMode()
    ##    # Get the rates for the entire run
    ##    self.getRates()
    ##    # Find the max LS for that run
    ##    trig = self.Rates.keys()[0]
    ##    self.lastLS = self.currentLS
    ##    maxLS = max(self.Rates[trig].keys())
    ##    # Simulate the run
    ##    self.lastRunNumber = self.runNumber
    ##    self.lastLS = 1
    ##    while self.currentLS < maxLS:
    ##        modTime += 23.3
    ##        self.currentLS += 1
    ##        if modTime > 60 or self.currentLS == maxLS:
    ##            modTime -= 60
    ##            # Print table
    ##            self.runLoop()
    ##            # Check for bad triggers
    ##            self.checkTriggers()
    ##            # We would sleep here if this was an online run
    ##            if not self.quiet: print "Simulating 60 s of sleep..."
    ##            self.lastLS = self.currentLS
    ##    print "End of simulation"


    # Use: The main body of the main loop, checks the mode, creates trigger lists, prints table
    # Returns: (void)
    def runLoop(self):
        # Reset counting variable
        self.normal = 0
        self.bad = 0

        # If we have started a new run
        if self.lastRunNumber != self.runNumber:
            print "Starting a new run: Run %s" % (self.runNumber)
            self.lastLS = 1
            self.currentLS = 0
            # Check what mode we are in
            self.setMode()
            self.getRates()
            self.redoTriggerLists()
            
        # Get Rates: [triggerName][LS] { raw rate, prescale }
        ##if not self.simulate: self.getRates()
        self.getRates()
        #Construct (or reconstruct) trigger lists
        if self.redoTList:
            self.redoTriggerLists()

        # Make sure there is info to use
        
        if len(self.HLTRates) == 0 or len(self.L1Rates) == 0:
            self.redoTList = True
        if len(self.HLTRates) == 0 and len(self.L1Rates) == 0:
            print "No new information can be retrieved. Waiting... (There may be no new LS, or run active may be false)"
            self.redoTList = True
            return
        
        # If we are not simulating a previous run. Otherwise, we already set lastLS and currentLS
        ##if not self.simulate:
        lslist = []
        for trig in self.Rates.keys():
            if len(self.Rates[trig])>0: lslist.append(max(self.Rates[trig]))
        # Update lastLS
        self.lastLS = self.currentLS
        # Update current LS
        if len(lslist)>0: self.currentLS = max(lslist)
        
        if self.useLSRange: # Adjust runs so we only look at those in our range
            self.slidingLS = -1 # No sliding LS window
            self.lastLS = max( [self.lastLS, self.LSRange[0]] )
            self.currentLS = min( [self.currentLS, self.LSRange[1] ] )

        # If there are lumisection to show, print info for them
        if self.currentLS > self.lastLS:
            self.printTable()
            self.isUpdating = True
        else:
            self.isUpdating = False
            print "Not enough lumisections. Last LS was %s, current LS is %s. Waiting." % (self.lastLS, self.currentLS)

    def setMode(self):
        self.sendMailAlerts_dynamic = self.sendMailAlerts_static
        try:
            self.triggerMode = self.parser.getTriggerMode(self.runNumber)[0]
        except:
            self.triggerMode = "Other"
        if self.triggerMode.find("cosmics") > -1:
            self.mode = "cosmics"
        elif self.triggerMode.find("circulate") > -1:
            self.mode = "circulate"
        elif self.triggerMode.find("collisions") > -1:
            self.mode = "collisions"
        elif self.triggerMode == "MANUAL":
            self.mode = "MANUAL"
        elif self.triggerMode.find("highrate") > -1:
            self.mode = "other"
            self.sendMailAlerts_dynamic = False
        else: self.mode = "other"

    # Use: Remakes the trigger lists
    def redoTriggerLists(self):
        self.redoTList = False
        # Reset the trigger lists
        self.usableHLTTriggers = []
        self.otherHLTTriggers = []
        self.usableL1Triggers = []
        self.otherL1Triggers = []
        self.fullL1HLTMenu = []
        # Reset bad rate records
        self.badRates = {}           # A dictionary: [ trigger name ] { num consecutive bad, trigger bad last check, rate, expected, dev }
        self.recordAllBadRates = {}  # A dictionary: [ trigger name ] < total times the trigger was bad >

        #set trigger lists automatically based on mode
        if not self.useAll and not self.userSpecTrigList:
            if self.mode == "cosmics" or self.mode == "circulate":
                self.triggerList = self.loadTriggersFromFile(self.cosmics_triggerList)
                print "monitoring triggers in: ", self.cosmics_triggerList
            elif self.mode == "collisions":
                self.triggerList = self.loadTriggersFromFile(self.collisions_triggerList)
                print "monitoring triggers in: ", self.collisions_triggerList
            else:
                self.triggerList = ""
                print "No lists to monitor: trigger mode not recognized"

            self.TriggerListL1 = []
            self.TriggerListHLT = []
            for triggerName in self.triggerList:
                if triggerName[0:3]=="L1_":
                    self.TriggerListL1.append(triggerName)
                elif triggerName[0:4]=="HLT_":
                    self.TriggerListHLT.append(triggerName)

        # Re-make trigger lists
        for trigger in self.HLTRates.keys():
            if (not self.InputFitHLT is None and self.InputFitHLT.has_key(trigger)) and \
            (len(self.TriggerListHLT) !=0 and trigger in self.TriggerListHLT):
                self.usableHLTTriggers.append(trigger)
            elif trigger[0:4] == "HLT_" and (self.triggerList == "" or trigger in self.TriggerListHLT):
                self.otherHLTTriggers.append(trigger)
            elif (trigger[0:4] == "HLT_"): self.fullL1HLTMenu.append(trigger) 

        for trigger in self.L1Rates.keys():
            if (not self.InputFitL1 is None and self.InputFitL1.has_key(trigger)) and \
            (len(self.TriggerListL1) != 0 and trigger in self.TriggerListL1):
                self.usableL1Triggers.append(trigger)
            elif trigger[0:3] == "L1_" and (self.triggerList =="" or trigger in self.TriggerListL1):
                self.otherL1Triggers.append(trigger)
            elif (trigger[0:3] == "L1_"): self.fullL1HLTMenu.append(trigger) 
                        
        self.getHeader()
        
    # Use: Gets the rates for the lumisections we want
    def getRates(self):
        if not self.useLSRange:
            self.HLTRates = self.parser.getRawRates(self.runNumber, self.lastLS)
            self.L1Rates = self.parser.getL1RawRates(self.runNumber)
            self.streamData = self.parser.getStreamData(self.runNumber, self.lastLS)
            self.pdData = self.parser.getPrimaryDatasets(self.runNumber, self.lastLS)
        else:
            self.HLTRates = self.parser.getRawRates(self.runNumber, self.LSRange[0], self.LSRange[1])
            self.L1Rates = self.parser.getL1RawRates(self.runNumber)
            self.streamData = self.parser.getStreamData(self.runNumber, self.LSRange[0], self.LSRange[1])
            self.pdData = self.parser.getPrimaryDatasets(self.runNumber, self.LSRange[0], self.LSRange[1])
        self.totalStreams = len(self.streamData.keys())
        self.Rates = {}
        self.Rates.update(self.HLTRates)
        self.Rates.update(self.L1Rates)
        self.totalHLTTriggers = len(self.HLTRates.keys())
        self.totalL1Triggers = len(self.L1Rates.keys())
                
    # Use: Retrieves information and prints it in table form
    def printTable(self):
        if self.slidingLS == -1:
            self.startLS = self.lastLS
        else: self.startLS = max( [0, self.currentLS-self.slidingLS ] )+1
        # Reset variable
        self.normal = 0
        self.bad = 0
        PScol = -1
        # Get the inst lumi
        aveLumi = 0
        try:
            self.deadTimeData = self.parser.getDeadTime(self.runNumber)
            aveDeadTime = 0
        except:
            self.deadTimeData = {}
            aveDeadTime = None
            print "Error getting deadtime data"
        
        # Get total L1 rate
        l1rate = 0
        try:
            l1rateData = self.parser.getL1rate(self.runNumber)
            aveL1rate = 0
        except:
            l1rateData = {}
            aveL1rate = None
            print "Error getting total L1 rate data"
            
        physicsActive = False # True if we have at least 1 LS with lumi and physics bit true
        if self.mode != "cosmics":
            lumiData = self.parser.getLumiInfo(self.runNumber, self.startLS, self.currentLS)
            self.numBunches = self.parser.getNumberCollidingBunches(self.runNumber)
            # Find the average lumi since we last checked
            count = 0
            # Get luminosity (only for non-cosmic runs)
            for LS, instLumi, psi, physics, all_subSys_good in lumiData:
                # If we are watching a certain range, throw out other LS
                if self.useLSRange and (LS < self.LSRange[0] or LS > self.LSRange[1]): continue
                # Average our instLumi
                if not instLumi is None and physics:
                    physicsActive = True
                    PScol = psi
                    if not aveDeadTime is None and self.deadTimeData.has_key(LS): aveDeadTime += self.deadTimeData[LS]
                    else: aveDeadTime = 0
                    if not aveL1rate is None and l1rateData.has_key(LS): aveL1rate += l1rateData[LS]
                    else: aveL1rate = 0
                    aveLumi += instLumi
                    count += 1
            if count == 0:
                aveLumi = "NONE"
                expected = "NONE"
            else:
                aveLumi /= float(count)
                aveDeadTime /= float(count)
                aveL1rate /= float(count)
        else:
            count = 0
            for LS in l1rateData.keys():
                if self.useLSRange and (LS < self.LSRange[0] or LS > self.LSRange[1]): continue
                if not aveDeadTime is None and self.deadTimeData.has_key(LS): aveDeadTime += self.deadTimeData[LS]
                else: aveDeadTime = 0
                if not aveL1rate is None and l1rateData.has_key(LS): aveL1rate += l1rateData[LS]
                else: aveL1rate = 0
            if not count == 0:
                aveDeadTime /= float(count)
                aveL1rate /= float(count)
            
            
        self.lumi_ave = aveLumi
        if self.numBunches[0] > 0 and not aveLumi == "NONE":
            self.pu_ave = aveLumi/self.numBunches[0]*ppInelXsec/orbitsPerSec
        else:
            self.pu_ave = "NONE"
        # We only do predictions when there were physics active LS in a collisions run
        doPred = physicsActive and self.mode=="collisions"
        # Print the header
        self.printHeader()
        # Print the triggers that we can make predictions for
        anytriggers = False
        if len(self.usableHLTTriggers)>0:
            print '*' * self.hlength
            print "Predictable HLT Triggers (ones we have a fit for)"
            print '*' * self.hlength
            anytriggers = True
        self.L1 = False
        self.printTableSection(self.usableHLTTriggers, doPred, aveLumi)
        if len(self.usableL1Triggers)>0:
            print '*' * self.hlength
            print "Predictable L1 Triggers (ones we have a fit for)"
            print '*' * self.hlength
            anytriggers = True
        self.L1 = True
        self.printTableSection(self.usableL1Triggers, doPred, aveLumi)

        #check the full menu for paths deviating past thresholds
        fullMenu_fits = False
        for trigger in self.fullL1HLTMenu: self.getTriggerData(trigger, fullMenu_fits, aveLumi)

        # Print the triggers that we can't make predictions for
        if self.useAll or self.mode != "collisions" or self.InputFitHLT is None:
            print '*' * self.hlength
            print "Unpredictable HLT Triggers (ones we have no fit for or do not try to fit)"
            print '*' * self.hlength
            self.L1 = False
            self.printTableSection(self.otherHLTTriggers, False)
            self.printTableSection(self.otherL1Triggers, False)
            anytriggers = True
        if self.useL1:
            print '*' * self.hlength
            print "Unpredictable L1 Triggers (ones we have no fit for or do not try to fit)"
            print '*' * self.hlength
            self.L1 = True
            self.printTableSection(self.otherL1Triggers, False)
            anytriggers = True

        if not anytriggers:
            print '*' * self.hlength
            print "\n --- No useable triggers --- \n"

        # Print stream data
        if self.showStreams:
            print '*' * self.hlength
            streamSpacing = [ 50, 20, 25, 25, 25, 25 ]
            head = stringSegment("* Stream name", streamSpacing[0])
            head += stringSegment("* NLumis", streamSpacing[1])
            head += stringSegment("* Events", streamSpacing[2])
            head += stringSegment("* Stream rate [Hz]", streamSpacing[3])
            head += stringSegment("* File size [GB]", streamSpacing[4])
            head += stringSegment("* Stream bandwidth [GB/s]", streamSpacing[5])
            print head
            print '*' * self.hlength
            for name in sorted(self.streamData.keys()):
                count = 0
                streamsize = 0
                aveBandwidth = 0
                aveRate = 0
                for LS, rate, size, bandwidth in self.streamData[name]:
                    streamsize += size
                    aveRate += rate
                    aveBandwidth += bandwidth
                    count += 1
                if count > 0:
                    aveRate /= count
                    streamsize /= (count*1000000000.0)
                    aveBandwidth /= (count*1000000000.0)
                    row = stringSegment("* "+name, streamSpacing[0])
                    row += stringSegment("* "+str(int(count)), streamSpacing[1])
                    row += stringSegment("* "+str(int(aveRate*23.3*count)), streamSpacing[2])
                    row += stringSegment("* "+"{0:.2f}".format(aveRate), streamSpacing[3])
                    row += stringSegment("* "+"{0:.2f}".format(streamsize), streamSpacing[4])
                    row += stringSegment("* "+"{0:.5f}".format(aveBandwidth), streamSpacing[5])
                    if not self.noColors and aveRate > self.maxStreamRate and self.mode != "other": write(bcolors.WARNING) # Write colored text
                    print row
                    if not self.noColors and aveRate > self.maxStreamRate and self.mode != "other": write(bcolors.ENDC)    # Stop writing colored text 
                else: pass

        # Print PD data
        if self.showPDs:
            print '*' * self.hlength
            pdSpacing = [ 50, 20, 25, 25]
            head = stringSegment("* Primary Dataset name", pdSpacing[0])
            head += stringSegment("* NLumis", pdSpacing[1])
            head += stringSegment("* Events", pdSpacing[2])
            head += stringSegment("* Dataset rate [Hz]", pdSpacing[3])
            print head
            print '*' * self.hlength
            for name in self.pdData.keys():
                count = 0
                aveRate = 0
                for LS, rate in self.pdData[name]:
                    aveRate += rate
                    count += 1
                if count > 0:
                    aveRate /= count
                    row = stringSegment("* "+name, pdSpacing[0])
                    row += stringSegment("* "+str(int(count)), pdSpacing[1])
                    row += stringSegment("* "+str(int(aveRate*23.3*count)), pdSpacing[2])
                    row += stringSegment("* "+"{0:.2f}".format(aveRate), pdSpacing[3])
                    if not self.noColors and aveRate > self.maxPDRate and self.mode != "other": write(bcolors.WARNING) # Write colored text
                    print row
                    if not self.noColors and aveRate > self.maxPDRate and self.mode != "other": write(bcolors.ENDC)    # Stop writing colored text 
                else: pass

        # Closing information
        print '*' * self.hlength
        print "SUMMARY:"
        if self.mode=="collisions": print "Triggers in Normal Range: %s   |   Triggers outside Normal Range: %s" % (self.normal, self.bad)
        if self.mode=="collisions":
            print "Prescale column index:", 
            if PScol == 0:
                if not self.noColors and PScol == 0 and self.mode != "other": write(bcolors.WARNING) # Write colored text
                print PScol, "\t0 - Column 0 is an emergency column in collision mode, please select the proper column"
                if not self.noColors and PScol == 0 and self.mode != "other": write(bcolors.ENDC)    # Stop writing colored text 
            else:
                print PScol
        try:
            print "Average inst. lumi: %.0f x 10^30 cm-2 s-1" % (aveLumi)
        except:
            print "Average inst. lumi: Not available"
        print "Total L1 rate: %.0f Hz" % (aveL1rate)
        print "Average dead time: %.2f %%" % (aveDeadTime)
        try: 
            print "Average PU: %.2f" % (self.pu_ave)
        except: 
            print "Average PU: %s" % (self.pu_ave)
        print '*' * self.hlength

    # Use: Prints the table header
    def printHeader(self):
        print "\n\n", '*' * self.hlength
        print "INFORMATION:"
        print "Run Number: %s" % (self.runNumber)
        print "LS Range: %s - %s" % (self.startLS, self.currentLS)
        print "Latest LHC Status: %s" % self.parser.getLHCStatus()[1]
        print "Number of colliding bunches: %s" % self.numBunches[0]
        print "Trigger Mode: %s (%s)" % (self.triggerMode, self.mode)
        print "Number of HLT Triggers: %s \nNumber of L1 Triggers: %s" % (self.totalHLTTriggers, self.totalL1Triggers)
        print "Number of streams:", self.totalStreams
        print '*' * self.hlength
        print self.header
        
    # Use: Prints a section of a table, ie all the triggers in a trigger list (like usableHLTTriggers, otherHLTTriggers, etc)
    def printTableSection(self, triggerList, doPred, aveLumi=0):
        self.tableData = [] # A list of tuples, each a row in the table: ( { trigger, rate, predicted rate, sign of % diff, abs % diff, sign of sigma, abs sigma, ave PS, comment } )

        # Get the trigger data
        for trigger in triggerList: self.getTriggerData(trigger, doPred, aveLumi)
        
        # Sort by % diff if need be
        if doPred:
            # [4] is % diff, [6] is deviation
            if self.usePerDiff: self.tableData.sort(key=lambda tup : tup[4], reverse = True)
            else: self.tableData.sort(key=lambda tup : tup[6], reverse = True)
        elif self.sortRates:
            self.tableData.sort(key=lambda tup: tup[1], reverse = True)
        for trigger, rate, pred, sign, perdiff, dsign, dev, avePS, comment in self.tableData:
            info = stringSegment("* "+trigger, self.spacing[0])
            info += stringSegment("* "+"{0:.2f}".format(rate), self.spacing[1])
            if pred!="": info += stringSegment("* "+"{0:.2f}".format(pred), self.spacing[2])
            else: info += stringSegment("", self.spacing[2])
            if perdiff=="": info += stringSegment("", self.spacing[3])
            elif perdiff=="INF": info += stringSegment("* INF", self.spacing[3])
            else: info += stringSegment("* "+"{0:.2f}".format(sign*perdiff), self.spacing[3])
            if dev=="": info += stringSegment("", self.spacing[4])
            elif dev=="INF" or dev==">1E6": info += stringSegment("* "+dev, self.spacing[4])
            else: info += stringSegment("* "+"{0:.2f}".format(dsign*dev), self.spacing[4])
            info += stringSegment("* "+"{0:.2f}".format(avePS), self.spacing[5])
            info += stringSegment("* "+comment, self.spacing[6])

            # Color the bad triggers with warning colors
            
            if avePS != 0 and self.isBadTrigger(perdiff, dev, rate, trigger[0:3]=="L1_"):
                if not self.noColors and self.mode != "other": write(bcolors.WARNING) # Write colored text 
                print info
                if not self.noColors and self.mode != "other": write(bcolors.ENDC)    # Stop writing colored text
            # Don't color normal triggers
            else:
                print info

    # Use: Returns whether a given trigger is bad
    # Returns: Whether the trigger is bad
    def isBadTrigger(self, perdiff, dev, psrate, isL1):
        if psrate == 0: return False
        if self.mode == "other": return False
        if ( (self.usePerDiff and perdiff!="INF" and perdiff!="" and abs(perdiff)>self.percAccept) or (dev!="INF" and dev!="" and (dev==">1E6" or abs(dev)>self.devAccept)))\
        or (perdiff!="INF" and perdiff!="" and abs(perdiff)>self.percAccept and dev!="INF" and dev!="" and abs(dev)>self.devAccept)\
        or (isL1 and psrate>self.maxL1Rate)\
        or (not isL1 and psrate>self.maxHLTRate): return True
        
        return False

    # Use: Gets a row of the table, self.tableData: ( { trigger, rate, predicted rate, sign of % diff, abs % diff, ave PS, comment } )
    # Parameters:
    # -- trigger : The name of the trigger
    # -- doPred  : Whether we want to make a prediction for this trigger
    # -- aveLumi : The average luminosity during the LS in question
    # Returns: (void)
    def getTriggerData(self, trigger, doPred, aveLumi):
        # In case of critical error (this shouldn't occur)
        if not self.Rates.has_key(trigger): return
        # If cosmics, don't do predictions
        if self.mode == "cosmics": doPred = False
        # Calculate rate
        if self.mode != "cosmics" and doPred:
            if not aveLumi is None:
                expected = self.calculateRate(trigger, aveLumi)
                if expected<0: expected = 0                # Don't let expected value be negative
                avePSExpected = expected
                # Get the mean square error (standard deviation)
                mse = self.getMSE(trigger)
            else:
                expected = None
                avePSExpected = None
                mse = None
        # Find the ave rate since the last time we checked
        aveRate = 0
        properAvePSRate = 0
        avePS = 0
        aveDeadTime = 0
        count = 0
        comment = ""
        
        correct_for_deadtime = self.deadTimeCorrection
        if trigger[0:3]=="L1_": correct_for_deadtime = False
        
        for LS in self.Rates[trigger].keys():
            if self.useLSRange and (LS < self.LSRange[0] or LS > self.LSRange[1]): continue
            elif LS < self.startLS or LS > self.currentLS: continue

            prescale = self.Rates[trigger][LS][1]
            rate = self.Rates[trigger][LS][0]
            try:
                deadTime = self.deadTimeData[LS]
            except:
                print "trouble getting deadtime for LS: ", LS," setting DT to zero"
                deadTime = 0                

            if correct_for_deadtime: rate *= 1. + (deadTime/100.)
                
            if prescale > 0: properAvePSRate += rate/prescale
            else: properAvePSRate += rate
            aveRate += rate
            count += 1
            avePS += prescale
            aveDeadTime += deadTime
                
        if count > 0:
            if aveRate == 0: comment += "0 counts "
            aveRate /= count
            properAvePSRate /= count
            avePS /= count
            aveDeadTime /= count
        else:
            #comment += "PS=0"
            comment += "No rate yet "
            doPred = False
        
        if doPred and not avePSExpected is None and avePS > 1: avePSExpected /= avePS
        if not doPred and self.removeZeros and aveRate==0: return  # Returns if we are not making predictions for this trigger and we are throwing zeros

        # We want this trigger to be in the table
        row = [trigger]
        if self.displayRawRates:
            row.append(aveRate)
        else:
            row.append(properAvePSRate)
        if doPred and not expected is None: row.append(avePSExpected)
        else: row.append("") # No predicted rate
        # Find the % diff
        if doPred:
            if expected == "NONE":
                perc = "UNDEF"
                dev = "UNDEF"
                row.append(1)    # Sign of % diff
                row.append(perc) # abs % diff
                row.append(1)    # Sign of deviation
                row.append(dev)  # abs deviation
            else:
                diff = aveRate-expected
                if expected!=0: perc = 100*diff/expected
                else: perc = "INF"
                if mse!=0:
                    dev = diff/mse
                    if abs(dev)>1000000: dev = ">1E6"
                else: dev = "INF"
                if perc>0: sign=1
                else: sign=-1
                row.append(sign)       # Sign of % diff
                if perc!="INF": row.append(abs(perc))  # abs % diff
                else: row.append("INF")
                #if mse>0: sign=1
                #else: sign=-1
                row.append(sign)       # Sign of the deviation
                if dev!="INF" and dev!=">1E6":
                    row.append(abs(dev))   # abs deviation
                else: row.append(dev)
        else:
            row.append("") # No prediction, so no sign of a % diff
            row.append("") # No prediction, so no % diff
            row.append("") # No prediction, so no sign of deviation
            row.append("") # No prediction, so no deviation
        # Add the rest of the info to the row
        row.append(avePS)
        row.append(comment)

        # Add row to the table data
        if doPred:
            if expected > 0:
                self.tableData.append(row)
        else:
            self.tableData.append(row)

        #do not warn on specific triggers
        for vetoString in self.ignoreStrings:
            if trigger.find(vetoString) > -1: return
        # Check if the trigger is bad
        if doPred:
            # Check for bad rates.
            #if (self.usePerDiff and perc!="INF" and perc>self.percAccept) or \
            #(not self.usePerDiff and dev!="INF" and (dev==">1E6" or dev>self.devAccept)):
            if self.isBadTrigger(perc, dev, properAvePSRate, trigger[0:3]=="L1_"):
                self.bad += 1
                # Record if a trigger was bad
                if not self.recordAllBadRates.has_key(trigger):
                    self.recordAllBadRates[trigger] = 0
                self.recordAllBadRates[trigger] += 1
                # Record consecutive bad rates
                if not self.badRates.has_key(trigger):
                    self.badRates[trigger] = [1, True, properAvePSRate, avePSExpected, dev, avePS ]
                else:
                    last = self.badRates[trigger]
                    self.badRates[trigger] = [ last[0]+1, True, properAvePSRate, avePSExpected, dev, avePS ]
            else:
                self.normal += 1
                # Remove warning from badRates
                if self.badRates.has_key(trigger): del self.badRates[trigger]
                    
        else:
            if self.isBadTrigger("", "", properAvePSRate, trigger[0:3]=="L1_") and avePS > 0:
                self.bad += 1
                # Record if a trigger was bad
                if not self.recordAllBadRates.has_key(trigger):
                    self.recordAllBadRates[trigger] = 0
                self.recordAllBadRates[trigger] += 1
                # Record consecutive bad rates
                if not self.badRates.has_key(trigger):
                    self.badRates[trigger] = [ 1, True, properAvePSRate, -999, -999, -999 ]
                else:
                    last = self.badRates[trigger]
                    self.badRates[trigger] = [ last[0]+1, True, properAvePSRate, -999, -999, -999 ]
            else:
                self.normal += 1
                # Remove warning from badRates
                if self.badRates.has_key(trigger): del self.badRates[trigger]
                    

    # Use: Checks triggers to make sure none have been bad for to long
    def checkTriggers(self):
        if self.displayBadRates != 0:
            count = 0
            if self.displayBadRates != -1: write("First %s triggers that are bad: " % (self.displayBadRates)) 
            elif len(self.badRates) > 0 : write("All triggers deviating past thresholds from fit and/or L1 rate > %s Hz, HLT rate > %s Hz: " %(self.maxL1Rate,self.maxHLTRate))
            for trigger in self.badRates:
                if self.badRates[trigger][1]:
                    count += 1
                    write(trigger)
                    if count != self.displayBadRates-1:
                        write(", ")
                if count == self.displayBadRates:
                    write(".....")
                    break
            print ""

        # Print warnings for triggers that have been repeatedly misbehaving
        mailTriggers = [] # A list of triggers that we should mail alerts about
        for trigger in self.badRates:
            if self.badRates[trigger][1]:
                if self.badRates[trigger][0] >= 1:
                    print "Trigger %s has been out of line for more than %.1f minutes" % (trigger, float(self.badRates[trigger][0])*self.scale_sleeptime)
                # We want to mail an alert whenever a trigger exits the acceptable threshold envelope
                if self.badRates[trigger][0] == self.maxCBR:
                    mailTriggers.append( [ trigger, self.badRates[trigger][2], self.badRates[trigger][3], self.badRates[trigger][4], self.badRates[trigger][5] ] )
        # Send mail alerts
        if len(mailTriggers)>0 and self.isUpdating:
            if self.sendMailAlerts_static and self.sendMailAlerts_dynamic: self.sendMail(mailTriggers)
            if self.sendAudioAlerts: audioAlert()
            
    # Use: Sleeps and prints out waiting dots
    def sleepWait(self):
        if not self.quiet: print "Sleeping for %.1f sec before next query" % (60.0*self.scale_sleeptime)
        for iSleep in range(20):
            if not self.quiet: write(".")
            sys.stdout.flush()
            time.sleep(3.0*self.scale_sleeptime)
        sys.stdout.flush()
        print ""
            
    # Use: Loads the fit data from the fit file
    # Parameters:
    # -- fitFile: The file that the fit data is stored in (a pickle file)
    # Returns: The input fit data
    def loadFit(self, fileName):
        if fileName == "":
            return None
        InputFit = {} # Initialize InputFit (as an empty dictionary)
        # Try to open the file containing the fit info
        try:
            pkl_file = open(fileName, 'rb')
            InputFit = pickle.load(pkl_file)
            pkl_file.close()
        except:
            # File failed to open
            print "Error: could not open fit file: %s" % (fileName)
        return InputFit

    # Use: Calculates the expected rate for a trigger at a given ilumi based on our input fit
    def calculateRate(self, triggerName, ilum):
        # Make sure we have a fit for the trigger
        if not self.L1 and (self.InputFitHLT is None or not self.InputFitHLT.has_key(triggerName)):
            return 0
        elif self.L1 and ((self.InputFitL1 is None) or not self.InputFitL1.has_key(triggerName)):
            return 0
        # Get the param list
        if self.L1: paramlist = self.InputFitL1[triggerName]
        else: paramlist = self.InputFitHLT[triggerName]
        # Calculate the rate
        if paramlist[0]=="exp": funcStr = "%s + %s*expo(%s+%s*x)" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4]) # Exponential
        else: funcStr = "%s+x*(%s+ x*(%s+x*%s))" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4]) # Polynomial
        fitFunc = TF1("Fit_"+triggerName, funcStr)
        if self.pileUp:
            if self.numBunches[0] > 0:
                return self.numBunches[0]*fitFunc.Eval(ilum/self.numBunches[0]*ppInelXsec/orbitsPerSec)
            else:
                return 0
        return fitFunc.Eval(ilum)

    # Use: Gets the MSE of the fit
    def getMSE(self, triggerName):
        if not self.L1 and (self.InputFitHLT is None or not self.InputFitHLT.has_key(triggerName)):
            return 0
        elif self.L1 and ((self.InputFitL1 is None) or not self.InputFitL1.has_key(triggerName)):
            return 0
        if self.L1: paramlist = self.InputFitL1[triggerName]
        else: paramlist = self.InputFitHLT[triggerName]
        if self.pileUp:
            return self.numBunches[0]*paramlist[5]
        return paramlist[5] # The MSE

    # Use: Sends an email alert
    # Parameters:
    # -- mailTriggers: A list of triggers that we should include in the mail, ( { triggerName, aveRate, expected rate, standard dev } )
    # Returns: (void)
    def sendMail(self, mailTriggers):
        mail = "Run: %d, Lumisections: %s - %s \n" % (self.runNumber, self.lastLS, self.currentLS)
        try: mail += "Average inst. lumi: %.0f x 10^30 cm-2 s-1\n" % (self.lumi_ave)
        except: mail += "Average inst. lumi: %s x 10^30 cm-2 s-1\n" % (self.lumi_ave)
        
        try: mail += "Average PU: %.2f\n \n" % (self.pu_ave)
        except: mail += "Average PU: %s\n \n" % (self.pu_ave)
        
        mail += "Trigger rates deviating from acceptable and/or expected values: \n\n"

        for triggerName, rate, expected, dev, ps in mailTriggers:
        
            if self.numBunches[0] == 0:
                mail += "\n %s: Actual: %s Hz\n" % (stringSegment(triggerName, 35), rate)
            else:
                if expected > 0:
                    try: mail += "\n %s: Expected: %.1f Hz, Actual: %.1f Hz, Unprescaled Expected/nBunches: %.5f Hz, Unprescaled Actual/nBunches: %.5f Hz, Deviation: %.1f\n" % (stringSegment(triggerName, 35), expected, rate, expected*ps/self.numBunches[0], rate*ps/self.numBunches[0], dev)
                    except: mail += "\n %s: Expected: %s Hz, Actual: %s Hz, Unprescaled Expected/nBunches: %s Hz, Unprescaled Actual/nBunches: %s Hz, Deviation: %s\n" % (stringSegment(triggerName, 35), expected, rate, expected*ps/self.numBunches[0], rate*ps/self.numBunches[0], dev)
                    mail += "  *referenced fit: <https://raw.githubusercontent.com/cms-tsg-fog/RateMon/master/Fits/2016/plots/%s.png>\n" % (triggerName)                    
                else:
                    try: mail += "\n %s: Actual: %.1f Hz\n" % (stringSegment(triggerName, 35), rate)
                    except: mail += "\n %s: Actual: %s Hz\n" % (stringSegment(triggerName, 35), rate)

            try:
                wbm_url = self.parser.getWbmUrl(self.runNumber,triggerName,self.currentLS)
                if not wbm_url == "-": mail += "  *WBM rate: <%s>\n" % (wbm_url)
            except:
                print "WBM plot url query failed"

                
        mail += "\nWBM Run Summary: <https://cmswbm.web.cern.ch/cmswbm/cmsdb/servlet/RunSummary?RUN=%s> \n\n" % (self.runNumber)
        mail += "Email warnings triggered when: \n"
        mail += "   - L1 or HLT rates deviate by more than %s standard deviations from fit \n" % (self.devAccept)
        mail += "   - HLT rates > %s Hz \n" % (self.maxHLTRate)
        mail += "   - L1 rates > %s Hz \n" % (self.maxL1Rate)

        print "--- SENDING MAIL ---\n"+mail+"\n--------------------"
        mailAlert(mail)
Exemplo n.º 3
0
class RateMonitor:
    # Default constructor for RateMonitor class
    def __init__(self):
        # Set ROOT properties
        gROOT.SetBatch(True) # Use batch mode so plots are not spit out in X11 as we make them
        gStyle.SetPadRightMargin(0.2) # Set the canvas right margin so the legend can be bigger
        ROOT.gErrorIgnoreLevel = 7000 # Suppress info message from TCanvas.Print
        # Member variables
        self.runFile = "" # The name of the file that a list of runs is contained in
        self.runList = [] # A list of runs to process
        self.jsonFilter = False
        self.jsonFile = ""
        self.jsonData = {}
        self.fitFile = "" # The name of the file that the fit info is contained in
        self.logFile = ""
        #self.colorList = [602, 856, 410, 419, 801, 798, 881, 803, 626, 920, 922] #[2,3,4,6,7,8,9,28,38,30,40,46] # List of colors that we can use for graphing
        #self.colorList = [2,3,4,6,7,8,9,28,38,30,40,46] # List of colors that we can use for graphing
        self.colorList = [4,6,8,7,9,419,46,20,28,862,874,38,32,40,41,5,3] # List of colors that we can use for graphing
        self.processAll = False  # If true, we process all the runs in the run list
        self.varX = "instLumi"   # Plot the instantaneous luminosity on the x axis
        self.varY = "rawRate"     # Plot the unprescaled rate on the y axis
        self.labelX = "instantaneous lumi"
        self.labelY = "unprescaled rate [Hz]"
        
        self.saveName = ""       # A name that we save the root file as
        self.saveDirectory = ""  # A directory that we can save all our files in if we are in batch mode
        self.updateOnlineFits = False #Update the online fits in the git directory
        self.ops = [] #options for log file
        
        self.parser = DBParser() # A database parser
        self.TriggerList = []    # The list of triggers to consider in plot-making 

        # Trigger Options
        self.L1Triggers = False  # If True, then we get the L1 trigger Data
        self.HLTTriggers = True  # If True, then we get the HLT trigger Data
        self.correctForDT = True # Correct rates for deadtime
        self.savedAFile = False  # True if we saved at least one file

        # Stream + PD Options
        self.plotStreams = False # If true, we plot the streams
        self.plotDatasets = False # If true, we plot the primary datasets

        # Error File Options
        self.makeErrFile = False # If true, we will write an error file
        self.errFileName = ""    # The name of the error file
        self.errFile = None      # A file to output errors to
        
        self.certifyMode = False # False -> Primary mode, True -> Secondary mode
        self.certifyDir = None
        self.runsToProcess = 12  # How many runs we are about to process
        self.sigmas = 3.0        # How many sigmas the error bars should be
        self.errorBands = True   # display error self.sigmas bands on the rate vs inst lumi plot
        self.png = True          # If true, create png images of all plots

        # Fitting
        self.allRates = {}       # Retain a copy of rates to use for validating lumisections later on: [ runNumber ] [ triggerName ] [ LS ] { rawRate, ps }
        self.predictionRec = {}  # A dictionary used to store predictions and prediction errors: [ triggerName ] { ( LS ), ( prediction ), (error) }
        self.minStatistics = 10  # The minimum number of points that we will allow for a run and still consider it
        self.fit = False         # If true, we fit the data to fit functions
        self.InputFit = None     # The fit from the fit file that we open
        self.OutputFit = None    # The fit that we can make in primary mode
        self.outFitFile = ""     # The name of the file that we will save an output fit to
        self.fitFinder = FitFinder()  # A fit finder object
        self.pileUp = True
        self.bunches = 1         # The number of colliding bunches if divByBunches is true, 1 otherwise
        self.showEq = True       # Whether we should show the fit equation on the plot
        self.dataCol = 0         # The column of the input data that we want to use as our y values
 
        # Batch mode variables
        self.plot_steam_rates = False
        self.steamRates = {}

        #misc.
        self.minNum = 0
        self.maxNum = 0
        
        # Cuts
        self.minPointsToFit = 10 # The minimum number of points we need to make a fit
        self.maxDeadTime = 10.    # the maximum % acceptable deadtime, if deadtime is > maxDeadTime, we do not plot or fit that lumi

        # self.useFit:
        # If False, no fit will be plotted and all possible triggers will be used in graph making.
        # If True, only triggers in the fit file will be considered, and the fit will be plotted
        self.useFit = True                                 

        # self.useTrigList
        # If False, modify self.triggerList as neccessary to include as many triggers as possible
        # If True, only use triggers in self.triggerList
        self.useTrigList = False



    # Use: sets up the variables before the main loop in run()
    # Returns: (void)
    def setUp(self):
        print "" # Formatting
        length = len(self.runList)
        
        try:
            self.runList.sort()
        except:
            print "Unable to sort runs"
            return
            
        self.bunches = 1 # Reset bunches, just in case
        if self.certifyMode: self.varX = "LS" # We are in secondary mode
        self.savedAFile = False  # Reset self.savedAFile

        # Read JSON file
        if self.jsonFilter:
            with open(self.jsonFile) as jsonfile:    
                self.jsonData = json.load(jsonfile)
            if not len(self.jsonData) > 0:
                print "JSON file is empty or not valid"
                self.jsonFilter = False
            else:
                print "JSON file list of runs: ",
                for k in self.jsonData.keys():
                    print "%s" % k,
                print "\n"
        # Apply run pre-filtering
        if self.jsonFilter:
            self.runList = [x for x in self.runList if "%d" % x in self.jsonData]
                
        self.minNum = self.runList[0]
        self.maxNum = self.runList[-1]

        # If we are supposed to, get the fit, a dictionary: [ triggername ] [ ( fit parameters ) ]
        if self.useFit or self.certifyMode: # Always try to load a fit in secondary mode
            self.InputFit = self.loadFit()
            if not self.useTrigList and not self.InputFit is None: self.TriggerList = sorted(self.InputFit)

        
        if not self.certifyMode and self.saveDirectory == "": self.saveDirectory = "fits__"+str(self.minNum) + "-" + str(self.maxNum)

        if not self.certifyMode:
            if os.path.exists(self.saveDirectory):
                shutil.rmtree(self.saveDirectory)
                print "Removing existing directory %s " % (self.saveDirectory)
                
            os.mkdir(self.saveDirectory)
            if self.png:
                os.chdir(self.saveDirectory)
                os.mkdir("png")
                os.chdir("../")
            print "Created directory %s " % (self.saveDirectory)
            self.saveName = self.saveDirectory + "/" + self.saveName
            
        # File names and name templates
        RootNameTemplate = "HLT_%s_vs_%s_%s_Run%s-%s_Tot%s_cert.root"
        if self.outFitFile=="": self.outFitFile = self.saveDirectory+"/HLT_Fit_Run%s-%s_Tot%s_fit.pkl" % (self.minNum, self.maxNum, self.runsToProcess)
        if self.useFit or self.fit or (self.certifyMode and not self.InputFit is None): fitOpt = "Fitted"
        else: fitOpt = "NoFit"

        self.saveName = self.saveDirectory+"/"+RootNameTemplate % (self.varX, self.varY, fitOpt, self.minNum, self.maxNum, self.runsToProcess)
        if self.certifyMode: self.saveName = self.certifyDir+"/"+self.saveName

        # Remove any root files that already have that name
        if os.path.exists(self.saveName): os.remove(self.saveName)

        # Open a file for writing errors
        if self.makeErrFile:
            self.errFileName = "rateGrapher_%s_%s.err" % (self.minNum, self.maxNum) # Define the error file name
            try: self.errFile = open(self.errFileName, 'w')
            except: print "Could not open error file."

        # If we are going to save fit find debug graph, delete any old ones
        if self.fitFinder.saveDebug and os.path.exists("Debug.root"): os.remove("Debug.root")
        if not self.certifyMode:
        ###dump a log file with the exact command used to produce the results
            self.logFile = self.saveDirectory+"/command_line.txt"
            command_line_str = "Results produced with:\n"
            command_line_str+="python plotTriggerRates.py "
            for tuple in self.ops:
                if tuple[0].find('--updateOnlineFits') > -1: continue #never record when we update online fits
                if len(tuple[1]) == 0: command_line_str+= "%s " % (tuple[0])
                else: command_line_str+= "%s=%s " % (tuple[0],tuple[1])
            for run in self.runList: command_line_str+= "%d " % (run)
            command_line_str+="\n"
        
            command_line_log_file = open(self.logFile, "w")
            command_line_log_file.write(command_line_str)
            command_line_log_file.close()
        
        
    def runBatch(self):
        
        if self.certifyMode: 
            self.certifyDir = "Certification_%sruns_%s_%s" % (len(self.runList),str(datetime.datetime.now()).split()[0],str(datetime.datetime.now()).split()[1].split(':')[0] +"_"+ str(datetime.datetime.now()).split()[1].split(':')[1])
            if os.path.exists(self.certifyDir): shutil.rmtree(self.certifyDir)
            os.mkdir(self.certifyDir)

        plottingData = {} # A dictionary [ trigger name ] [ run number ] { ( inst lumi's || LS ), ( data ) }
        self.setUp() # Set up parameters and data structures

        for run_number in self.runList:
            self.run(run_number, plottingData)
            if self.certifyMode:
                self.makeFits(plottingData)
                plottingData = {}
            print "-----" # Newline for formatting
                
        if self.certifyMode:
            self.doChecks()
        else:
            self.makeFits(plottingData)
    
    # Use: Created graphs based on the information stored in the class (list of runs, fit file, etc)
    # Returns: (void)
    def run(self,runNumber,plottingData):
        print ""  # Print a newline (just for formatting)
        
        print "\nProcessing run: %d" % (runNumber)


        if self.certifyMode: #should probably find a better place for this...
            self.saveDirectory = "run%s" % (str(runNumber))
            if os.path.exists(self.saveDirectory):
                shutil.rmtree(self.saveDirectory)
                print "Removing existing directory %s " % (self.saveDirectory)
                
            os.chdir(self.certifyDir)
            os.mkdir(self.saveDirectory)
            if self.png:
                os.chdir(self.saveDirectory)
                os.mkdir("png")
                os.chdir("../../")
            print "Created directory %s " % (self.saveDirectory)
            self.saveName = self.saveDirectory + "/" + self.saveName
            
            # File names and name templates
            RootNameTemplate = "HLT_%s_vs_%s_%s_Run%s_CERTIFICATION.root"
            if self.useFit or self.fit or (not self.InputFit is None): fitOpt = "Fitted"
            else: fitOpt = "NoFit"

            self.saveName = self.saveDirectory+"/"+RootNameTemplate % (self.varX, self.varY, fitOpt, runNumber)
            self.saveName = self.certifyDir+"/"+self.saveName

            # Remove any root files that already have that name
            if os.path.exists(self.saveName): os.remove(self.saveName)

        
        if self.pileUp:
            self.bunches = self.parser.getNumberCollidingBunches(runNumber)[0]
            if self.bunches is None or self.bunches is 0:
                print "Cannot get number of bunches: skipping this run.\n"
                return
            print "(%s colliding bunches)" % (self.bunches)
            
        # Get run info in a dictionary: [ trigger name ] { ( inst lumi's ), ( raw rates ) }
        dataList = self.getData(runNumber)

        if dataList == {}:
            # The run does not exist (or some other critical error occured)
            print "Fatal error for run %s, could not retrieve data. Probably Lumi was None or physics was not active. Moving on." % (runNumber) # Info message
            return
            
        # Make plots for each trigger
        if not self.plotStreams and not self.plotDatasets:
            for triggerName in self.TriggerList:
                if dataList.has_key(triggerName): # Add this run to plottingData[triggerName]
                    # Make sure the is an entry for this trigger in plottingData
                    if not plottingData.has_key(triggerName):
                        plottingData[triggerName] = {}
                    plottingData[triggerName][runNumber] = dataList[triggerName]
                elif self.makeErrFile: # The trigger data was not taken from the DB or does not exist
                    # This should not occur if useFit is false, all triggers should be processed
                    message = "For run %s Trigger %s could not be processed\n" % (runNumber, triggerName)
                    self.errFile.write(message)
        elif not self.plotDatasets: # Otherwise, make plots for each stream
            sumPhysics = "Sum_Physics_Streams"
            for streamName in dataList:
                if not plottingData.has_key(streamName): plottingData[streamName] = {}
                plottingData[streamName][runNumber] = dataList[streamName]
                
                if not plottingData.has_key(sumPhysics):
                    plottingData[sumPhysics] = {}
                if (streamName[0:7] =="Physics" or streamName[0:9] =="HIPhysics") and not plottingData[sumPhysics].has_key(runNumber):
                    plottingData[sumPhysics][runNumber] = plottingData[streamName][runNumber]
                elif (streamName[0:7] =="Physics" or streamName[0:9] =="HIPhysics"):
                    if plottingData[sumPhysics] != {}:
                        ls_number =0
                        for rate in plottingData[streamName][runNumber][1]:
                            plottingData[sumPhysics][runNumber][1][ls_number] += rate
                            ls_number +=1
                        
        else: # Otherwise, make plots for each dataset
                for pdName in dataList:
                    if not plottingData.has_key(pdName):
                        plottingData[pdName] = {}
                    plottingData[pdName][runNumber] = dataList[pdName]



    def makeFits(self, plottingData):
        if self.fit: self.findFit(plottingData)
        print "\n"

        # We have all our data, now plot it
        if self.useFit or (self.certifyMode and not self.InputFit is None): fitparams = self.InputFit
        elif self.fit: fitparams = self.OutputFit # Plot the fit that we made
        else: fitparams = None

        if self.plot_steam_rates: self.getSteamRates()

        for name in sorted(plottingData):
            if fitparams is None or not fitparams.has_key(name): fit = None
            else: fit = fitparams[name]
            self.graphAllData(plottingData[name], fit, name)

        # Try to close the error file
        if self.makeErrFile:
            try:
                self.errFile.close() # Close the error file
                print "Error file saved to", self.errFileName # Info message
            except: print "Could not save error file."

        if self.fitFinder.saveDebug and self.fitFinder.usePointSelection: print "Fit finder debug file saved to Debug.root.\n" # Info message

        if self.savedAFile: print "File saved as %s" % (self.saveName) # Info message
        else: print "No files were saved. Perhaps none of the triggers you requested were in use for this run."

        if self.png: self.printHtml(plottingData)
        
        if self.updateOnlineFits:
            #online files/dirs
            online_git_dir = "Fits/2016/"
            online_plots_dir = online_git_dir+"plots/"
            online_fit_file = online_git_dir+"FOG.pkl"
            online_cmd_line_file = online_git_dir+"command_line.txt"
            
            #files and dirs to be copied
            self.outFitFile
            self.logFile
            png_dir = self.saveDirectory+"/png/"

            shutil.rmtree(online_plots_dir)
            shutil.copytree(png_dir,online_plots_dir)
            shutil.copy(self.outFitFile,online_fit_file) #copy fit
            shutil.copy(self.logFile,online_cmd_line_file) #copy command line


        
    # Use: Gets the data we desire in primary mode (rawrate vs inst lumi) or secondary mode (rawrate vs LS)
    # Parameters:
    # -- runNumber: The number of the run we want data from
    # Returns: A dictionary:  [ trigger name ] { ( inst lumi's || LS ), ( data ) }
    def getData(self, runNumber):
        Rates = {}
        if self.HLTTriggers:
            Rates = self.parser.getRawRates(runNumber) # Get the HLT raw rate vs LS
            if self.correctForDT: self.correctForDeadtime(Rates, runNumber) # Correct HLT Rates for deadtime

        if self.L1Triggers:
            L1Rates = self.parser.getL1RawRates(runNumber) # Get the L1 raw rate vs LS
            Rates.update(L1Rates)

        if Rates == {}:
            print "trouble fetching rates from db"
            return {} # The run (probably) doesn't exist
        
        # JSON filtering
        # Removes from the Rates dictionary the lumisections not included in the JSON file, if present.
        if self.jsonFilter:
            runNumberStr = "%d" % runNumber
            # Check for run number
            if not runNumberStr in self.jsonData:
                print "Run", runNumberStr, "is not included in the JSON file"
                return {}
            else:
                print "Run", runNumberStr, "and lumisections", self.jsonData[runNumberStr], "are included in the JSON file"
            # Remove lumisections
            for trigger, lumi in Rates.iteritems():
                lumis = lumi.keys()
                for i, ls in enumerate(lumis):
                    if not any(l <= ls <= u for [l, u] in self.jsonData[runNumberStr]): del Rates[trigger][ls]

        iLumi = self.parser.getLumiInfo(runNumber) # If we are in primary mode, we need luminosity info, otherwise, we just need the physics bit
        #iLumi = self.getBrilCalcLumi(runNumber) #for special studies using brilcalc lumis
        

        # Get the trigger list if useFit is false and we want to see all triggers (self.useTrigList is false)
        if not self.useFit and not self.useTrigList and not self.certifyMode:
            for triggerName in sorted(Rates):
                if not triggerName in self.TriggerList:
                    self.TriggerList.append(triggerName)

        # Store Rates for this run
        self.allRates[runNumber] = Rates

        # Get stream data
        if self.plotStreams:
            # Stream Data [ stream name ] { LS, rate, size, bandwidth }
            streamData = self.parser.getStreamData(runNumber)
            Data = {}
            # Format the data correcly: [ stream name ] [ LS ] = { rate, size, bandwidth }
            for name in streamData:
                Data[name] = {}
                for LS, rate, size, bandwidth in streamData[name]:
                    Data[name][LS] = [ rate, size, bandwidth ]
        # Get PD data
        elif self.plotDatasets:
            # pdData [ pd name ] { LS, rate }
            pdData = self.parser.getPrimaryDatasets(runNumber)
            Data = {}
            # Format the data correcly: [ pd name ] [ LS ] = {rate}
            for name in pdData:
                Data[name] = {}
                for LS, rate in pdData[name]:
                    Data[name][LS] = [ rate ]
        else: Data = Rates

        # Depending on the mode, we return different pairs of data
        if not self.certifyMode:
            return self.combineInfo(Data, iLumi) # Combine the rates and lumi into one dictionary, [ trigger name ] { ( inst lumi's ), ( raw rates ) } and return
        else: # self.certifyMode == True
            return self.sortData(Data, iLumi)

    # Use: Modifies the rates in Rates, correcting them for deadtime
    # Parameters:
    # -- Rates: A dictionary [ triggerName ] [ LS ] { raw rate, prescale }
    # Returns: (void), directly modifies Rates
    def correctForDeadtime(self, Rates, runNumber):
        # Get the deadtime
        deadTime = self.parser.getDeadTime(runNumber)
        for LS in deadTime:
            for triggerName in Rates:
                if Rates[triggerName].has_key(LS): # Sometimes, LS's are missing
                    Rates[triggerName][LS][0] *= (1. + deadTime[LS]/100.)
                    if deadTime[LS] > self.maxDeadTime and not self.certifyMode: del Rates[triggerName][LS] #do not plot lumis where deadtime is greater than                


    # Use: Combines the Rate data and instant luminosity data into a form that we can make a graph from
    # Parameters:
    # -- Data: A dictionary [ triggerName ] [ LS ] { col 0, col 1, ... }
    # -- iLumi: A list ( { LS, instLumi, cms active } )
    # Returns: A dictionary: [ name ] { ( inst lumi's ), ( Data[...][col] ) }
    def combineInfo(self, Data, iLumi, col=0):
        # Create a dictionary [ trigger name ] { ( inst lumi's ), ( Data[...][col] ) }
        dataList = {}
        # For each trigger in Rates
        for name in Data:
            iLuminosity = array.array('f')
            yvals = array.array('f')
            detector_ready = array.array('f')
            for LS, ilum, psi, phys, cms_ready in iLumi:
                if Data[name].has_key(LS) and phys and not ilum is None:
                    # Normalize ilumi here, if we aren't normalizing, self.bunches is set to 1
                    normedILumi = ilum/self.bunches
                    # Extract the required data from Data
                    data = Data[name][LS][self.dataCol]
                    # We apply our cuts here if they are called for
                    detector_ready.append(cms_ready)
                    if self.pileUp:
                        PU = (ilum/self.bunches*ppInelXsec/orbitsPerSec) 
                        iLuminosity.append(PU)
                        yvals.append(data/self.bunches)
                    else:
                        iLuminosity.append(ilum)     # Add the instantaneous luminosity for this LS
                        yvals.append(data) # Add the correspoinding raw rate

            iLuminosity, yvals = self.fitFinder.getGoodPoints(iLuminosity, yvals) #filter out points that differ greatly from the averages on a trigger,run basis 
            
            if len(iLuminosity) > 0:
                dataList[name] = [iLuminosity, yvals, detector_ready]
            else:
                pass
        return dataList

    # Use: Combines the Data from an array of the shown format and the instant luminosity data into a form that we can make a graph from
    # Parameters:
    # -- Data: A dictionary [ name ] [ LS ] { col 0, col 1, ... }
    # Returns: A dictionary: [ name ] { ( LS ), ( Data[LS][col] ) }
    def sortData(self, Data, iLumi):
        # Create a dictionary [ trigger name ] { ( LS ), ( Data[LS][col] ) }
        dataList = {}
        for name in Data:
            lumisecs = array.array('f')
            yvals = array.array('f')
            detector_ready = array.array('f')
            for LS, ilum, psi, phys, cms_ready in iLumi:
                if phys and not ilum is None and Data[name].has_key(LS):
                    lumisecs.append(LS)
                    detector_ready.append(cms_ready)
                    yvals.append(Data[name][LS][self.dataCol])
            dataList[name] = [lumisecs, yvals, detector_ready]
        return dataList

    # Use: Graphs the data from all runs and triggers onto graphs and saves them to the root file
    # Parameters:
    # -- plottingData: A dictionary [ run number ] { ( inst lumi's ), ( raw rates ) }
    # -- paramlist: A tuple: { FitType, X0, X1, X2, X3, sigma, meanrawrate, X0err, X1err, X2err, X3err } 
    # -- triggerName: The name of the trigger that we are examining
    # Returns: (void)
 
    def graphAllData(self, plottingData, paramlist, triggerName):        
        # Find max and min values
        maximumRR = array.array('f')
        maximumVals = array.array('f')
        minimumVals = array.array('f')

        yVals = array.array('f')
        xVals = array.array('f')

        # Find minima and maxima so we create graphs of the right size
        for runNumber in plottingData:
            xVals, yVals = self.fitFinder.getGoodPoints(plottingData[runNumber][0], plottingData[runNumber][1]) 
            if len(xVals) > 0:
                maximumRR.append(max(yVals))
                maximumVals.append(max(xVals))
                minimumVals.append(min(xVals))


        if len(maximumRR) > 0: max_yaxis_value = max(maximumRR)
        else: return
        
        if len(maximumVals) > 0:
            max_xaxis_val = max(maximumVals)
            min_xaxis_val = min(minimumVals)
        else: return


        if self.plot_steam_rates:
            if self.steamRates.has_key(triggerName.split('_v')[0]):
                steam_xVal = array.array('f'); steam_xVal.append(15.)
                steam_xVal_err = array.array('f'); steam_xVal_err.append(0.)
                steam_yVal = array.array('f'); steam_yVal.append(self.steamRates[triggerName.split('_v')[0]][0])
                steam_yVal_err = array.array('f'); steam_yVal_err.append(self.steamRates[triggerName.split('_v')[0]][1])
            
                steamGraph = TGraphErrors(1, steam_xVal, steam_yVal, steam_xVal_err, steam_yVal_err)
                steamGraph.SetMarkerStyle(29)
                steamGraph.SetMarkerSize(2.6)
                steamGraph.SetMarkerColor(1)
                if max(steam_yVal) > max_yaxis_value: max_yaxis_value = max(steam_yVal)
            else:
                return

        if max_xaxis_val==0 or max_yaxis_value==0: return

        # Set axis names/units, create canvas
        if self.pileUp:
            nameX = "< PU >"
            xunits = ""
            self.labelY = "unprescaled rate / num colliding bx [Hz]"
        else:
            xunits = "[10^{30} Hz/cm^{2}]"
            nameX = "instantaneous luminosity"
        if self.certifyMode:
            xunits = ""
            nameX = "lumisection"
            self.labelY = "unprescaled rate [Hz]"
        canvas = TCanvas((self.varX+" "+xunits), self.varY, 1000, 600)
        canvas.SetName(triggerName+"_"+self.varX+"_vs_"+self.varY)
        plotFuncStr = ""
        funcStr = ""
        if (self.useFit or self.fit) and not paramlist is None:
            if self.certifyMode:
                # Make a prediction graph of raw rate vs LS for values between min_xaxis_val and max_xaxis_val
                runNum_cert = plottingData.keys()[0]
                predictionTGraph = self.makePredictionTGraph(paramlist, min_xaxis_val, max_xaxis_val, triggerName, runNum_cert)
                maxPred = self.predictionRec[triggerName][runNum_cert][0][1]
                if maxPred > max_yaxis_value: max_yaxis_value = maxPred

            else: #primary mode
                if paramlist[0]=="exp": 
                     plotFuncStr = "%.5f + %.5f*exp( %.5f+%.5f*x )" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4]) # Exponential
                     funcStr = "%.5f + %.5f*exp( %.5f+%.5f*x )" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4])
                elif paramlist[0]=="linear": 
                    plotFuncStr = "%.15f + x*%.15f" % (paramlist[1], paramlist[2])
                    funcStr = "%.5f + x*%.5f" % (paramlist[1], paramlist[2])                   
                else: 
                    plotFuncStr = "%.15f+x*(%.15f+ x*(%.15f+x*%.15f))" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4])#Polynomial
                    funcStr = "%.5f+x*(%.5f+ x*(%.5f+x*%.5f))" % (paramlist[1], paramlist[2], paramlist[3], paramlist[4])
                
                #max_xaxis_val = 40 #Extend x-axis to 40
                fitFunc = TF1("Fit_"+triggerName, plotFuncStr, 0., 1.1*max_xaxis_val)                
                #if fitFunc.Eval(max_xaxis_val) > max_yaxis_value: max_yaxis_value = fitFunc.Eval(max_xaxis_val) #extend y-axis to maximum fit value

                if self.errorBands:
                    xVal = array.array('f')
                    yVal = array.array('f')
                    yeVal = array.array('f')
                    xeVal = array.array('f')
                
                    xMin = fitFunc.GetXmin()
                    xMax = fitFunc.GetXmax()
                    xrange = xMax-xMin
                    nPoints = 1000
                    stepSize = xrange/nPoints
                
                    xCoord = xMin
                    while xCoord <= xMax:
                        xVal.append(xCoord)
                        yVal.append(fitFunc.Eval(xCoord))
                        yeVal.append(self.sigmas*paramlist[5])
                        xeVal.append(0)
                        xCoord += stepSize
                    
                    fitErrorBand = TGraphErrors(len(xVal),xVal,yVal,xeVal,yeVal)
                    fitErrorBand.SetFillColor(2)
                    fitErrorBand.SetFillStyle(3003)


        counter = 0        
        # This is the only way I have found to get an arbitrary number of graphs to be plotted on the same canvas. This took a while to get to work.
        graphList = []
        # Create legend
        left = 0.81; right = 0.98; top = 0.9; scaleFactor = 0.05; minimum = 0.1
        bottom = max( [top-scaleFactor*(len(plottingData)+1), minimum]) # Height we desire for the legend, adjust for number of entries
        legend = TLegend(left,top,right,bottom)

        for runNumber in sorted(plottingData):
            numLS = len(plottingData[runNumber][0])
            bunchesForLegend = self.parser.getNumberCollidingBunches(runNumber)[0]
            if numLS == 0: continue
            graphList.append(TGraph(numLS, plottingData[runNumber][0], plottingData[runNumber][1]))

            # Set some stylistic settings for dataGraph
            graphColor = self.colorList[counter % len(self.colorList)]# + (counter // len(self.colorList)) # If we have more runs then colors, we just reuse colors (instead of crashing the program)
            graphList[-1].SetMarkerStyle(7)
            graphList[-1].SetMarkerSize(1.0)
            graphList[-1].SetLineColor(graphColor)
            graphList[-1].SetFillColor(graphColor)
            graphList[-1].SetMarkerColor(graphColor)
            graphList[-1].SetLineWidth(2)
            graphList[-1].GetXaxis().SetTitle(nameX+" "+xunits)
            graphList[-1].GetXaxis().SetLimits(0, 1.1*max_xaxis_val)
            graphList[-1].GetYaxis().SetTitle(self.labelY)
            graphList[-1].GetYaxis().SetTitleOffset(1.2)
            graphList[-1].SetMinimum(0)
            graphList[-1].SetMaximum(1.2*max_yaxis_value)
            graphList[-1].SetTitle(triggerName)
            if counter == 0: graphList[-1].Draw("AP")
            else: graphList[-1].Draw("P")
            canvas.Update()
            if bunchesForLegend > 0: legend.AddEntry(graphList[-1], "%s (%s b)" %(runNumber,bunchesForLegend), "f")
            else: legend.AddEntry(graphList[-1], "%s (- b)" %(runNumber), "f")
            counter += 1

        if (self.useFit or self.fit) and not paramlist is None:
            if self.certifyMode:
                predictionTGraph.Draw("PZ3")
                canvas.Update()
                legend.AddEntry(predictionTGraph, "Fit ( %s \sigma )" % (self.sigmas))
            else: # Primary Mode
                if self.errorBands: fitErrorBand.Draw("3")
                legend.AddEntry(fitFunc, "Fit ( %s \sigma )" % (self.sigmas))
                fitFunc.Draw("same") # Draw the fit function on the same graph

        if self.plot_steam_rates:
            if self.steamRates.has_key(triggerName.split('_v')[0]):
                steamGraph.Draw("P")
                legend.AddEntry(steamGraph,"STEAM estimate","p")
            
        if not funcStr == "" and self.showEq: # Draw function string on the plot
            funcLeg = TLegend(.146, .71, .47, .769)
            funcLeg.SetHeader("f(x) = " + funcStr)
            funcLeg.SetFillColor(0)
            funcLeg.Draw()
            canvas.Update()
        # draw text
        latex = TLatex()
        latex.SetNDC()
        latex.SetTextColor(1)
        latex.SetTextAlign(11)
        latex.SetTextFont(62)
        latex.SetTextSize(0.05)
        latex.DrawLatex(0.15, 0.84, "CMS")
        latex.SetTextSize(0.035)
        latex.SetTextFont(52)
        latex.DrawLatex(0.15, 0.80, "Rate Monitoring")
        
        canvas.SetGridx(1);
        canvas.SetGridy(1);

        canvas.Update()
        # Draw Legend
        legend.SetHeader("%s runs:" % (len(plottingData)))
        legend.SetFillColor(0)
        legend.Draw() 
        canvas.Update()
        # Update root file
        file = TFile(self.saveName, "UPDATE")
        canvas.Modified()
        if self.png:
            if not self.certifyMode: canvas.Print(self.saveDirectory+"/png/"+triggerName+".png", "png")
            else: canvas.Print(self.certifyDir+"/"+self.saveDirectory+"/png/"+triggerName+".png", "png")
        canvas.Write()
        file.Close()
        self.savedAFile = True
       

    # Use: Get a fit for all the data that we have collected
    # Parameters:
    # -- plottingData: A dictionary [triggerName] [ run number ] { ( inst lumi's ), ( raw rates ) }
    # Returns: (void)
    def findFit(self, plottingData):
        self.OutputFit = {}
        for name in sorted(plottingData):
            instLumis = array.array('f')
            yvals = array.array('f')
            detector_ready = array.array('f')
            for runNumber in sorted(plottingData[name]):
                #if runNumber == 273447 and ( name.find('DoubleMu') > -1 or name.find('QuadMu') > -1 or name.find('TripleMu') > -1 ): print "Skimming some Runs and Paths"; continue
                detector_ready = plottingData[name][runNumber][2]
                for x,y,cms_ready in zip(plottingData[name][runNumber][0],plottingData[name][runNumber][1],detector_ready):
                    if cms_ready: #only fit data points when ALL subsystems are IN.
                        instLumis.append(x)
                        yvals.append(y)
                        
            if len(instLumis) > self.minPointsToFit:
                try:
                    self.OutputFit[name] = self.fitFinder.findFit(instLumis, yvals, name)
                except:
                    continue

        self.saveFit()

    # Use: Save a fit to a file
    def saveFit(self):
        outputFile = open(self.outFitFile, "wb")
        pickle.dump(self.OutputFit, outputFile, 2)
        outputFile.close()
        self.sortFit()
        print "\nFit file saved to", self.outFitFile


    # Use: Sorts trigger fits by their chi squared value and writes it to a file
    def sortFit(self):
        outputFile = open(self.saveDirectory+"/sortedChiSqr.txt", "wb")
        chisqrDict = {}
        for trigger in self.OutputFit:
            _,_,_,_,_,_,_,_,_,_,_,chisqr = self.OutputFit[trigger]
            chisqrDict[chisqr] = trigger

        for chisqr in sorted(chisqrDict):
            outputFile.write(chisqrDict[chisqr] + ": " + str(chisqr) + "\n")
        outputFile.close
        print "Sorted chi-square saved to:"+self.saveDirectory+"/sortedChiSqr.txt"

    def printHtml(self,plottingData):
        try:
            if not self.certifyMode: htmlFile = open(self.saveDirectory+"/index.html", "wb")
            else: htmlFile = open(self.certifyDir+"/"+self.saveDirectory+"/index.html", "wb")
            htmlFile.write("<!DOCTYPE html>\n")
            htmlFile.write("<html>\n")
            htmlFile.write("<style>.image { float:right; margin: 5px; clear:justify; font-size: 6px; font-family: Verdana, Arial, sans-serif; text-align: center;}</style>\n")
            for pathName in sorted(plottingData):
                fileName = "%s/png/%s.png" % (self.saveDirectory,pathName)
                if self.certifyMode: fileName = "%s/%s/png/%s.png" % (self.certifyDir,self.saveDirectory,pathName)
                if os.access(fileName,os.F_OK):
                    htmlFile.write("<div class=image><a href=\'png/%s.png\'><img width=398 height=229 border=0 src=\'png/%s.png\'></a><div style=\'width:398px\'>%s</div></div>\n" % (pathName,pathName,pathName))
            htmlFile.write("</html>\n")
            htmlFile.close
        except:
            print "Unable to write index.html file"
            
    # Use: Creates a graph of predicted raw rate vs lumisection data
    # Parameters:
    # -- paramlist: A tuple of parameters { FitType, X0, X1, X2, X3, sigma, meanrawrate, X0err, X1err, X2err, X3err, ChiSqr } 
    # -- min_xaxis_val: The minimum LS
    # -- max_xaxis_val: The maximum LS
    # -- iLumi: A list: ( { LS, instLumi } )
    # -- triggerName: The name of the trigger we are making a fit for
    # Returns: A TGraph of predicted values
    def makePredictionTGraph(self, paramlist, min_xaxis_val, max_xaxis_val, triggerName, runNumber):
        # Initialize our point arrays
        lumisecs = array.array('f')
        predictions = array.array('f')
        lsError = array.array('f')
        predError = array.array('f')
        # Unpack values
        type, X0, X1, X2, X3, sigma, meanraw, X0err, X1err, X2err, X3err, ChiSqr = paramlist
        # Create our point arrays
        iLumi = self.parser.getLumiInfo(runNumber) # iLumi is a list: ( { LS, instLumi } )
        for LS, ilum, psi, phys, cms_ready in iLumi:
            if not ilum is None and phys:
                lumisecs.append(LS)
                pu = (ilum * ppInelXsec) / ( self.bunches * orbitsPerSec )
                # Either we have an exponential fit, or a polynomial fit
                if type == "exp":
                    rr = self.bunches * (X0 + X1*math.exp(X2+X3*pu))
                else:
                    rr = self.bunches * (X0 + pu*X1 + (pu**2)*X2 + (pu**3)*X3)
                if rr<0: rr=0 # Make sure prediction is non negative
                predictions.append(rr)
                lsError.append(0)
                predError.append(self.bunches*self.sigmas*sigma)
        # Record for the purpose of doing checks
        self.predictionRec.setdefault(triggerName,{})[runNumber] = zip(lumisecs, predictions, predError)
        # Set some graph options
        fitGraph = TGraphErrors(len(lumisecs), lumisecs, predictions, lsError, predError)
        fitGraph.SetTitle("Fit (%s sigma)" % (self.sigmas)) 
        fitGraph.SetMarkerStyle(8)
        fitGraph.SetMarkerSize(0.8)
        fitGraph.SetMarkerColor(2) # Red
        fitGraph.SetFillColor(4)
        fitGraph.SetFillStyle(3003)
        fitGraph.GetXaxis().SetLimits(min_xaxis_val, 1.1*max_xaxis_val)
        
        return fitGraph
        
    # Use: Loads the fit data from the fit file
    # Parameters:
    # -- fitFile: The file that the fit data is stored in (a pickle file)
    # Returns: The input fit data
    def loadFit(self):
        if self.fitFile == "":
            print "No fit file specified."
            return None
        InputFit = {} # Initialize InputFit (as an empty dictionary)
        # Try to open the file containing the fit info
        try:
            pkl_file = open(self.fitFile, 'rb')
            InputFit = pickle.load(pkl_file)
            pkl_file.close()
        except:
            # File failed to open
            print "Error: could not open fit file: %s" % (self.fitFile)
        return InputFit



    def getSteamRates(self):
        import csv
        file = "steamRate_forRateMon.csv"
        with open(file) as csvfile:
            steamReader=csv.reader(csvfile)
            for line in steamReader:
                path = line[0].split("_v")[0]
                
                if path.find("HLT_")!=-1:
                    try:
                        rate = float(line[6])
                        rateErr = float(line[7])
                    except:
                        rate -1
                        rateErr = -1
                        
                    if rate >0.: self.steamRates[path] = [rate, rateErr]



    def getBrilCalcLumi(self, run_number):
        import csv
        file = "lumi.csv"
        lumi_array = []
        with open(file) as csvfile:
            fileReader = csv.reader(csvfile)
            for line in fileReader:
                run_number_file = int(line[0].split(":")[0])
                if run_number_file != run_number: continue
                lumi_section = int(line[1].split(":")[0])
                delivered = float(line[5])/23.3
                recorded = float(line[6])/23.3
                lumi_array.append((lumi_section,recorded,0,1,1))
        return lumi_array

                

                    
    # Use: Check raw rates in lumisections against the prediction, take note if any are outside a certain sigma range
    # Returns: (void)
    def doChecks(self):
        eprint = ErrorPrinter()
        eprint.saveDirectory = self.certifyDir
        # Look at all lumisections for each trigger for each run. Check which ones are behaving badly
        for triggerName in self.TriggerList: # We may want to look at the triggers from somewhere else, but for now I assume this will work
            for runNumber in self.allRates:
                if self.predictionRec.has_key(triggerName):
                    if self.allRates[runNumber].has_key(triggerName): # In case some run did not contain this trigger
                        data = self.allRates[runNumber][triggerName]
                        
                        if self.predictionRec[triggerName].has_key(runNumber): predList = self.predictionRec[triggerName][runNumber]
                        else: continue

                        for LS, pred, err in predList:
                            if data.has_key(LS): # In case this LS is missing

                                if not eprint.run_allLs.has_key(runNumber): eprint.run_allLs[runNumber] = {}
                                if not eprint.run_allLs[runNumber].has_key(triggerName): eprint.run_allLs[runNumber][triggerName] = []
                                eprint.run_allLs[runNumber][triggerName].append(LS)
                                

                                
                                if(abs(data[LS][0] - pred) > err):
                                    if err != 0: errStr = str((data[LS][0]-pred)/err)
                                    else: errStr = "inf"
                                    # Add data to eprint.run_ls_trig
                                    if not eprint.run_ls_trig.has_key(runNumber):
                                        eprint.run_ls_trig[runNumber] = {}
                                    if not eprint.run_ls_trig[runNumber].has_key(int(LS)):
                                        eprint.run_ls_trig[runNumber][int(LS)] = []
                                    eprint.run_ls_trig[runNumber][LS].append(triggerName)
                                    # Add data to eprint.run_trig_ls
                                    if not eprint.run_trig_ls.has_key(runNumber):
                                        eprint.run_trig_ls[runNumber] = {}
                                    if not eprint.run_trig_ls[runNumber].has_key(triggerName):
                                        eprint.run_trig_ls[runNumber][triggerName] = []
                                    eprint.run_trig_ls[runNumber][triggerName].append(int(LS))

        eprint.outputErrors()
Exemplo n.º 4
0
class DataParser:
    # This is an interface for DBParser() to select and manage the data returned by DBParser()
    def __init__(self):
        # type: () -> None
        self.parser = DBParser()

        # The lists all have the same number of elements, e.g.: len(self.lumi_data[trg][run]) == len(self.pu_data[trg][run])
        self.ls_data = {}  # {'name': { run_number: [LS] } }
        self.rate_data = {}  # {'name': { run_number: { LS: raw_rates } } }
        self.ps_data = {}  # {'name': { run_number: { LS: prescale  } } }
        self.pu_data = {}  # {'name': { run_number: { LS: PU } } }
        self.lumi_data = {}  # {'name': { run_number: { LS: iLumi } } }
        self.det_data = {}  # {'name': { run_number: { LS: detecotr_ready } } }
        self.phys_data = {}  # {'name': { run_number: { LS: phys_active } } }
        self.bw_data = {}  # {'name': { run_number: { LS: bandwidth } } }
        self.size_data = {}  # {'name': { run_number: { LS: size } } }
        self.lumi_info = {}  # {run_number: [ (LS,ilum,psi,phys,cms_ready) ] }
        self.bunch_map = {}  # {run_number: nBunches }

        self.hlt_triggers = [
        ]  # List of specific HLT triggers we want to get rates for, if empty --> get all HLT rates
        self.l1_triggers = [
        ]  # List of specific L1 triggers we want to get rates for, if empty --> get all L1 rates
        self.runs_used = [
        ]  # List of runs which had rate info for queried objects
        self.runs_skipped = [
        ]  # List of runs which did not have rate info for queried objects
        self.name_list = [
        ]  # List of all named objects for which we have data, e.g. triggers, datasets, streams, etc...
        self.psi_filter = []
        self.type_map = {
        }  # Maps each object name to a type: trigger, dataset, stream, or L1A
        # NOTE: Still need to handle the case where if two objects share the same name, but diff type
        # NOTE2: This approach should be fine, since DataParser owns the nameing, will need to be careful

        self.ls_veto = {}  # {run_number:[LS list]} - LS to ignore
        self.name_veto = [
        ]  # List of paths/objects to remove from consideration

        self.use_prescaled_rate = False  # If true, then rates are not un-prescaled
        self.use_cross_section = False  # If true, then divide the rate by inst. lumi (only for L1 and HLT trigger data)
        self.normalize_bunches = True  # Normalize by the number of colliding bunches
        self.correct_for_DT = True
        self.convert_output = True  # Flag to convert data from { LS: data } to [ data ], used in the data getters

        self.skip_l1_triggers = False  # Flag to skip collecting rates for L1 triggers
        self.skip_hlt_triggers = False  # Flag to skip collecting rates for HLT triggers

        self.l1_rate_cut = 25e6  # Ignore L1 rates above this threshold

        self.max_deadtime = 10.
        self.min_ls = -1
        self.max_ls = 9999999

        self.use_L1_triggers = False  # Gets L1 rates
        self.use_HLT_triggers = False  # Gets HLT rates
        self.use_streams = False  # Gets stream rates
        self.use_datasets = False  # Gets dataset rates
        self.use_L1A_rate = False  # Gets the L1A rates

        self.use_ps_mask = False  # Collects data only for LS in the specified prescale indices

        self.use_best_lumi = True  # Uses best luminosity as determined by BRIL
        self.use_PLTZ_lumi = False  # Uses luminosity reading from PLTZ
        self.use_HF_lumi = False  # Uses luminosity reading from HF

        self.verbose = True

    def parseRuns(self, run_list):
        # type: (List[int]) -> None
        if len(self.hlt_triggers) == 0 and len(self.l1_triggers) > 0:
            self.skip_hlt_triggers = True

        if len(self.hlt_triggers) > 0 and len(self.l1_triggers) == 0:
            self.skip_l1_triggers = True

        counter = 1
        for run in sorted(run_list):
            if self.verbose:
                print "Processing run: %d (%d/%d)" % (run, counter,
                                                      len(run_list))
            counter += 1
            bunches = self.parser.getNumberCollidingBunches(run)[0]

            if bunches is None or bunches is 0:
                bunches = 1

            lumi_info = self.parseLumiInfo(
                run)  # [( LS,ilum,psi,phys,cms_ready ) ]
            run_data = self.getRunData(run, bunches, lumi_info)
            if len(
                    run_data.keys()
            ) == 0:  # i.e. no triggers/streams/datasets had enough valid rates
                self.runs_skipped.append(run)
                continue
            else:
                self.runs_used.append(run)
                self.bunch_map[run] = bunches
                self.lumi_info[run] = lumi_info

            for name in run_data:
                if name in self.name_veto:
                    continue

                ls_array = run_data[name]["LS"]
                rate = run_data[name]["rate"]
                prescale = run_data[name]["prescale"]
                pu = run_data[name]["PU"]
                lumi = run_data[name]["ilumi"]
                det_status = run_data[name]["status"]
                phys = run_data[name]["phys"]
                bw = run_data[name]["bandwidth"]
                size = run_data[name]["size"]

                if not name in self.name_list:
                    self.name_list.append(name)

                    self.ls_data[name] = {}
                    self.rate_data[name] = {}
                    self.ps_data[name] = {}
                    self.pu_data[name] = {}
                    self.lumi_data[name] = {}
                    self.det_data[name] = {}
                    self.phys_data[name] = {}
                    self.bw_data[name] = {}
                    self.size_data[name] = {}

                self.ls_data[name][run] = ls_array
                self.rate_data[name][run] = rate
                self.ps_data[name][run] = prescale
                self.pu_data[name][run] = pu
                self.lumi_data[name][run] = lumi
                self.det_data[name][run] = det_status
                self.phys_data[name][run] = phys
                self.bw_data[name][run] = bw
                self.size_data[name][run] = size

    def parseLumiInfo(self, run):
        # [( LS,ilum,psi,phys,cms_ready ) ]
        lumi_info = []

        trigger_mode = self.parser.getTriggerMode(run)[0]

        if trigger_mode.find('cosmics') > 0:
            # This is a cosmics menu --> No luminosity info
            if self.verbose:
                print "\tDetected cosmics run..."
                print "\tGetting lumi info..."
            for LS, psi in self.parser.getLSInfo(run):
                # We hard code phys and cms_ready to both be true for all LS in the run
                lumi_info.append([LS, 0.0, psi, 1, 1])
        elif trigger_mode.find('collisions') > 0:
            # This is a collisions menu
            if self.verbose:
                print "\tDetected collisions run..."
                print "\tGetting lumi info..."
            if self.use_best_lumi:
                lumi_info = self.parser.getQuickLumiInfo(run,
                                                         minLS=self.min_ls,
                                                         maxLS=self.max_ls)
            elif self.use_PLTZ_lumi:
                lumi_info = self.parser.getLumiInfo(run,
                                                    minLS=self.min_ls,
                                                    maxLS=self.max_ls,
                                                    lumi_source=1)
            elif self.use_HF_lumi:
                lumi_info = self.parser.getLumiInfo(run,
                                                    minLS=self.min_ls,
                                                    maxLS=self.max_ls,
                                                    lumi_source=2)
        else:
            # Unknown menu --> For now we assume it's compatibale with collisions type menus
            if self.verbose:
                print "\tUnknown run type: %s" % (trigger_mode)
                print "\tGetting lumi info..."
            if self.use_best_lumi:
                lumi_info = self.parser.getQuickLumiInfo(run,
                                                         minLS=self.min_ls,
                                                         maxLS=self.max_ls)
            elif self.use_PLTZ_lumi:
                lumi_info = self.parser.getLumiInfo(run,
                                                    minLS=self.min_ls,
                                                    maxLS=self.max_ls,
                                                    lumi_source=1)
            elif self.use_HF_lumi:
                lumi_info = self.parser.getLumiInfo(run,
                                                    minLS=self.min_ls,
                                                    maxLS=self.max_ls,
                                                    lumi_source=2)

        if self.ls_veto.has_key(run):
            new_lumi_info = []
            for LS, ilum, psi, phys, cms_ready in lumi_info:
                if LS in self.ls_veto[run]:
                    continue
                new_lumi_info.append([LS, ilum, psi, phys, cms_ready])
            lumi_info = new_lumi_info

        return lumi_info

    # This might be excessive, should think about reworking this section
    # ------
    # TODO: We need to ensure that none of the object names overlap with one another
    # (i.e. dataset names that overlap with stream names) for the rate data.
    def getRunData(self, run, bunches, lumi_info):
        # type: (int,int,List[Tuple[int,float,int,bool,bool]]) -> Dict[str: object]
        run_data = {}
        if bunches is None or bunches is 0:
            if self.verbose: print "Unable to get bunches"
            return {}

        if self.use_streams:
            run_data.update(self.getStreamData(run, bunches, lumi_info))
        if self.use_datasets:
            run_data.update(self.getDatasetData(run, bunches, lumi_info))
        if self.use_L1A_rate:
            run_data.update(self.getL1AData(run, bunches, lumi_info))
        if self.use_HLT_triggers:
            run_data.update(self.getHLTTriggerData(run, bunches, lumi_info))
        if self.use_L1_triggers:
            run_data.update(self.getL1TriggerData(run, bunches, lumi_info))

        return run_data

    # Returns information related to L1 triggers
    def getL1TriggerData(self, run, bunches, lumi_info):
        # type: (int,int,List[Tuple[int,float,int,bool,bool]]) -> Dict[str: object]
        if self.skip_l1_triggers:
            return {}

        if self.verbose: print "\tGetting L1 rates..."
        L1_rates = self.parser.getL1Rates(run,
                                          minLS=self.min_ls,
                                          maxLS=self.max_ls,
                                          scaler_type=1)

        run_data = {}  # {'object': {"LS": list, "rate": {...}, ... } }

        for trigger in L1_rates:
            self.type_map[trigger] = "trigger"
            run_data[trigger] = {}
            ls_array = array.array('f')
            rate_dict = {}
            ps_dict = {}
            pu_dict = {}
            lumi_dict = {}
            det_dict = {}
            phys_dict = {}
            bw_dict = {}
            size_dict = {}
            for LS, ilum, psi, phys, cms_ready in lumi_info:
                if psi not in self.psi_filter and self.use_ps_mask:
                    continue

                if not ilum is None and L1_rates[trigger].has_key(LS):
                    pu = (ilum / bunches * ppInelXsec / orbitsPerSec)
                    rate = L1_rates[trigger][LS][0]
                    prescale = L1_rates[trigger][LS][1]

                    if rate > self.l1_rate_cut:
                        continue

                    if self.normalize_bunches:
                        rate = rate / bunches

                    if self.use_prescaled_rate:
                        if prescale != 0:
                            rate = rate / prescale
                        #else:
                        #    rate = 0

                    if self.use_cross_section:
                        rate = rate / ilum

                    ls_array.append(LS)
                    rate_dict[LS] = rate
                    ps_dict[LS] = prescale
                    pu_dict[LS] = pu
                    lumi_dict[LS] = ilum
                    det_dict[LS] = cms_ready
                    phys_dict[LS] = phys
                    bw_dict[LS] = None
                    size_dict[LS] = None

            run_data[trigger]["LS"] = ls_array
            run_data[trigger]["rate"] = rate_dict
            run_data[trigger]["prescale"] = ps_dict
            run_data[trigger]["PU"] = pu_dict
            run_data[trigger]["ilumi"] = lumi_dict
            run_data[trigger]["status"] = det_dict
            run_data[trigger]["phys"] = phys_dict
            run_data[trigger]["bandwidth"] = bw_dict
            run_data[trigger]["size"] = size_dict
        return run_data

    # Returns information related to HLT triggers
    def getHLTTriggerData(self, run, bunches, lumi_info):
        # type: (int,int,List[Tuple[int,float,int,bool,bool]]) -> Dict[str: object]
        if self.skip_hlt_triggers:
            return {}

        if self.verbose: print "\tGetting HLT rates..."

        HLT_rates = self.parser.getHLTRates(run,
                                            self.hlt_triggers,
                                            minLS=self.min_ls,
                                            maxLS=self.max_ls)

        if self.correct_for_DT:
            self.correctForDeadtime(HLT_rates, run)

        run_data = {}  # {'object': {"LS": list, "rate": {...}, ... } }

        for trigger in HLT_rates:
            self.type_map[trigger] = "trigger"
            run_data[trigger] = {}
            ls_array = array.array('f')
            rate_dict = {}
            ps_dict = {}
            pu_dict = {}
            lumi_dict = {}
            det_dict = {}
            phys_dict = {}
            bw_dict = {}
            size_dict = {}
            for LS, ilum, psi, phys, cms_ready in lumi_info:
                if psi not in self.psi_filter and self.use_ps_mask:
                    continue

                if not ilum is None and HLT_rates[trigger].has_key(LS):
                    pu = (ilum / bunches * ppInelXsec / orbitsPerSec)
                    rate = HLT_rates[trigger][LS][0]
                    prescale = HLT_rates[trigger][LS][1]

                    if self.normalize_bunches:
                        rate = rate / bunches

                    if self.use_prescaled_rate:
                        if prescale != 0:
                            rate = rate / prescale
                        #else:
                        #    rate = 0

                    if self.use_cross_section:
                        rate = rate / ilum

                    ls_array.append(LS)
                    rate_dict[LS] = rate
                    ps_dict[LS] = prescale
                    pu_dict[LS] = pu
                    lumi_dict[LS] = ilum
                    det_dict[LS] = cms_ready
                    phys_dict[LS] = phys
                    bw_dict[LS] = None
                    size_dict[LS] = None

            run_data[trigger]["LS"] = ls_array
            run_data[trigger]["rate"] = rate_dict
            run_data[trigger]["prescale"] = ps_dict
            run_data[trigger]["PU"] = pu_dict
            run_data[trigger]["ilumi"] = lumi_dict
            run_data[trigger]["status"] = det_dict
            run_data[trigger]["phys"] = phys_dict
            run_data[trigger]["bandwidth"] = bw_dict
            run_data[trigger]["size"] = size_dict
        return run_data

    def getStreamData(self, run, bunches, lumi_info):
        # type: (int,int,List[Tuple[int,float,int,bool,bool]]) -> Dict[str: object]
        if self.verbose: print "\tGetting Stream rates..."
        data = self.parser.getStreamData(
            run, minLS=self.min_ls,
            maxLS=self.max_ls)  # {'stream': [ (LS,rate,size,bandwidth) ] }

        stream_rates = {}  # {'stream': {LS: (rate,size,bandwidth) } }

        # Format the output from DBParser()
        for name in data:
            stream_rates[name] = {}
            for LS, rate, size, bandwidth in data[name]:
                stream_rates[name][LS] = [rate, size, bandwidth]

        run_data = {}  # {'object': {"LS": list, "rate": {...}, ... } }

        blacklist = [
            "PhysicsEndOfFill", "PhysicsMinimumBias0", "PhysicsMinimumBias1",
            "PhysicsMinimumBias2"
        ]
        sum_list = []

        for _object in stream_rates:
            self.type_map[_object] = "stream"
            run_data[_object] = {}

            if _object[:7] == "Physics" and not _object in blacklist:
                sum_list.append(_object)

            ls_array = array.array('f')
            rate_dict = {}
            ps_dict = {}
            pu_dict = {}
            lumi_dict = {}
            det_dict = {}
            phys_dict = {}
            bw_dict = {}
            size_dict = {}
            for LS, ilum, psi, phys, cms_ready in lumi_info:
                if psi not in self.psi_filter and self.use_ps_mask:
                    continue

                if not ilum is None and stream_rates[_object].has_key(LS):
                    pu = (ilum / bunches * ppInelXsec / orbitsPerSec)
                    rate = stream_rates[_object][LS][0]
                    size = stream_rates[_object][LS][1]
                    bandwidth = stream_rates[_object][LS][2]

                    if self.normalize_bunches:
                        rate = rate / bunches

                    ls_array.append(LS)
                    rate_dict[LS] = rate
                    ps_dict[LS] = None
                    pu_dict[LS] = pu
                    lumi_dict[LS] = ilum
                    det_dict[LS] = cms_ready
                    phys_dict[LS] = phys
                    bw_dict[LS] = bandwidth
                    size_dict[LS] = size

            run_data[_object]["LS"] = ls_array
            run_data[_object]["rate"] = rate_dict
            run_data[_object]["prescale"] = ps_dict
            run_data[_object]["PU"] = pu_dict
            run_data[_object]["ilumi"] = lumi_dict
            run_data[_object]["status"] = det_dict
            run_data[_object]["phys"] = phys_dict
            run_data[_object]["bandwidth"] = bw_dict
            run_data[_object]["size"] = size_dict

        self.sumObjects(run_data=run_data,
                        new_name="Combined_Physics_Streams",
                        sum_list=sum_list,
                        obj_type="stream")

        return run_data

    def getDatasetData(self, run, bunches, lumi_info):
        # type: (int,int,List[Tuple[int,float,int,bool,bool]]) -> Dict[str: object]
        if self.verbose: print "\tGetting Dataset rates..."
        data = self.parser.getPrimaryDatasets(
            run, minLS=self.min_ls,
            maxLS=self.max_ls)  # {'dataset': [ (LS,rate) ] }

        dataset_rates = {}  # {'dataset': {LS: (rate) } }

        # Format the output from DBParser()
        for name in data:
            dataset_rates[name] = {}
            for LS, rate in data[name]:
                dataset_rates[name][LS] = [rate]

        run_data = {}  # {'object': {"LS": list, "rate": {...}, ... } }

        for _object in dataset_rates:
            self.type_map[_object] = "dataset"
            run_data[_object] = {}
            ls_array = array.array('f')
            rate_dict = {}
            ps_dict = {}
            pu_dict = {}
            lumi_dict = {}
            det_dict = {}
            phys_dict = {}
            bw_dict = {}
            size_dict = {}
            for LS, ilum, psi, phys, cms_ready in lumi_info:
                if psi not in self.psi_filter and self.use_ps_mask:
                    continue

                if not ilum is None and dataset_rates[_object].has_key(LS):
                    pu = (ilum / bunches * ppInelXsec / orbitsPerSec)
                    rate = dataset_rates[_object][LS][0]

                    if self.normalize_bunches:
                        rate = rate / bunches

                    ls_array.append(LS)
                    rate_dict[LS] = rate
                    ps_dict[LS] = None
                    pu_dict[LS] = pu
                    lumi_dict[LS] = ilum
                    det_dict[LS] = cms_ready
                    phys_dict[LS] = phys
                    bw_dict[LS] = None
                    size_dict[LS] = None

            run_data[_object]["LS"] = ls_array
            run_data[_object]["rate"] = rate_dict
            run_data[_object]["prescale"] = ps_dict
            run_data[_object]["PU"] = pu_dict
            run_data[_object]["ilumi"] = lumi_dict
            run_data[_object]["status"] = det_dict
            run_data[_object]["phys"] = phys_dict
            run_data[_object]["bandwidth"] = bw_dict
            run_data[_object]["size"] = size_dict
        return run_data

    # NOTE: L1A_rates has a slightly different dict format, the value-pair for the LS keys is NOT a list
    def getL1AData(self, run, bunches, lumi_info):
        # type: (int,int,List[Tuple[int,float,int,bool,bool]]) -> Dict[str: object]
        L1A_rates = {}  # {'L1A': {LS: rate } }

        if self.verbose: print "\tGetting L1ATotal rates..."
        L1A_rates["L1ATotal"] = self.parser.getL1rate(run)
        if self.verbose: print "\tGetting L1APhysics rates..."
        L1A_rates["L1APhysics"] = self.parser.getL1APhysics(run)
        if self.verbose: print "\tGetting L1APhysicsLost rates..."
        L1A_rates["L1APhysicsLost"] = self.parser.getL1APhysicsLost(run)

        run_data = {}  # {'object': {"LS": list, "rate": {...}, ... } }

        for _object in L1A_rates:
            self.type_map[_object] = "L1A"
            run_data[_object] = {}
            ls_array = array.array('f')
            rate_dict = {}
            ps_dict = {}
            pu_dict = {}
            lumi_dict = {}
            det_dict = {}
            phys_dict = {}
            bw_dict = {}
            size_dict = {}
            for LS, ilum, psi, phys, cms_ready in lumi_info:
                if psi not in self.psi_filter and self.use_ps_mask:
                    continue

                if not ilum is None and L1A_rates[_object].has_key(LS):
                    pu = (ilum / bunches * ppInelXsec / orbitsPerSec)
                    rate = L1A_rates[_object][LS]

                    if self.normalize_bunches:
                        rate = rate / bunches

                    ls_array.append(LS)
                    rate_dict[LS] = rate
                    ps_dict[LS] = None
                    pu_dict[LS] = pu
                    lumi_dict[LS] = ilum
                    det_dict[LS] = cms_ready
                    phys_dict[LS] = phys
                    bw_dict[LS] = None
                    size_dict[LS] = None

            run_data[_object]["LS"] = ls_array
            run_data[_object]["rate"] = rate_dict
            run_data[_object]["prescale"] = ps_dict
            run_data[_object]["PU"] = pu_dict
            run_data[_object]["ilumi"] = lumi_dict
            run_data[_object]["status"] = det_dict
            run_data[_object]["phys"] = phys_dict
            run_data[_object]["bandwidth"] = bw_dict
            run_data[_object]["size"] = size_dict
        return run_data

    # Use: Modifies the rates in Rates, correcting them for deadtime
    # Parameters:
    # -- Rates: A dict - {'trigger': {LS: (raw_rate,prescale) } }
    def correctForDeadtime(self, Rates, run_number):
        # type: (Dict[str,object],int) -> None
        dead_time = self.parser.getDeadTime(run_number)
        for LS in dead_time:
            for trigger in Rates:
                if Rates[trigger].has_key(LS):  # Sometimes, LS's are missing
                    Rates[trigger][LS][0] *= (1. + dead_time[LS] / 100.)
                    if dead_time[
                            LS] > self.max_deadtime:  # Do not plot lumis where deadtime is greater than 10%
                        del Rates[trigger][LS]

    # Creates a new dictionary key, that corresponds to the summed rates of all the specified objects
    # data: {'object': {"LS": list, "rate": {...}, ... } }
    def sumObjects(self, run_data, new_name, sum_list, obj_type):
        # type: (Dict[str,object],str,List[str],str) -> bool
        if not set(sum_list) <= set(run_data.keys()):
            if self.verbose:
                print "\tERROR: Specified objects that aren't in the run_data"
            return False
        if (len(sum_list) == 0):
            print "\tERROR: sum_list has size=0 (see sumObjects in DataParser.py). May be that there were no streams for this run."
            return False
        ref_name = sum_list[0]
        ls_array = array.array('f')
        rate_dict = {}
        ps_dict = {}
        pu_dict = {}
        lumi_dict = {}
        det_dict = {}
        phys_dict = {}
        bw_dict = {}
        size_dict = {}

        # We only use LS that are in *all* of the objects
        ls_set = set(run_data[ref_name]["LS"])
        for name in sum_list:
            ls_set = ls_set & set(run_data[name]["LS"])
        ls_array.extend(sorted(ls_set))

        for LS in ls_array:
            total_rate = 0
            total_bw = 0
            total_size = 0
            for name in sum_list:
                total_rate += run_data[name]["rate"][LS]
                try:
                    total_bw += run_data[name]["bandwidth"][LS]
                except:
                    total_bw = None

                try:
                    total_size += run_data[name]["size"][LS]
                except:
                    total_size = None
            rate_dict[LS] = total_rate
            bw_dict[LS] = total_bw
            size_dict[LS] = total_size
            ps_dict[LS] = None
            pu_dict[LS] = run_data[ref_name]["PU"][LS]
            lumi_dict[LS] = run_data[ref_name]["ilumi"][LS]
            det_dict[LS] = run_data[ref_name]["status"][LS]
            phys_dict[LS] = run_data[ref_name]["phys"][LS]

        self.type_map[new_name] = obj_type
        run_data[new_name] = {}
        run_data[new_name]["LS"] = ls_array
        run_data[new_name]["rate"] = rate_dict
        run_data[new_name]["prescale"] = ps_dict
        run_data[new_name]["PU"] = pu_dict
        run_data[new_name]["ilumi"] = lumi_dict
        run_data[new_name]["status"] = det_dict
        run_data[new_name]["phys"] = phys_dict
        run_data[new_name]["bandwidth"] = bw_dict
        run_data[new_name]["size"] = size_dict

        return True

    # Converts input: {'name': { run_number: { LS: data } } } --> {'name': {run_number: [ data ] } }
    def convertOutput(self, _input):
        # type: (Dict[str,object]) -> Dict[str,object]
        output = {}
        for name in _input:
            output[name] = {}
            for run in _input[name]:
                output[name][run] = array.array('f')
                for LS in sorted(_input[name][run].keys(
                )):  # iterating over *sorted* LS is extremely important here
                    output[name][run].append(_input[name][run][LS])
        return output

    def resetData(self):
        # type: () -> None
        self.ls_data = {}  # {'name': { run_number: [LS] } }
        self.rate_data = {}  # {'name': { run_number: { LS: raw_rates } } }
        self.ps_data = {}  # {'name': { run_number: { LS: prescale } } }
        self.pu_data = {}  # {'name': { run_number: { LS: PU } } }
        self.lumi_data = {}  # {'name': { run_number: { LS: iLumi } } }
        self.det_data = {}  # {'name': { run_number: { LS: detecotr_ready } } }
        self.phys_data = {}  # {'name': { run_number: { LS: phys_active } } }
        self.bw_data = {}  # {'name': { run_number: { LS: bandwidth } } }
        self.size_data = {}  # {'name': { run_number: { LS: size } } }
        self.lumi_info = {}  # {run_number: [ (LS,ilum,psi,phys,cms_ready) ] }
        self.bunch_map = {}  # {run_number: nBunches }

        self.runs_used = []
        self.runs_skipped = []
        self.name_list = []
        self.type_map = {}

####################################################################################################

# --- All the 'getters' ---

    def getLSData(self):
        # type: () -> Dict[str,object]
        return self.ls_data

    def getRateData(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.rate_data)
        else:
            output = self.rate_data
        return output

    def getPSData(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.ps_data)
        else:
            output = self.ps_data
        return output

    def getPUData(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.pu_data)
        else:
            output = self.pu_data
        return output

    def getLumiData(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.lumi_data)
        else:
            output = self.lumi_data
        return output

    def getDetectorStatus(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.det_data)
        else:
            output = self.det_data
        return output

    def getPhysStatus(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.phys_data)
        else:
            output = self.phys_data
        return output

    def getBandwidthData(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.bw_data)
        else:
            output = self.bw_data
        return output

    def getSizeData(self):
        # type: () -> Dict[str,object]
        if self.convert_output:
            output = self.convertOutput(self.size_data)
        else:
            output = self.size_data
        return output

    def getLumiInfo(self):
        # type: () -> Dict[int,List[Tuple]]
        return self.lumi_info

    def getBunchMap(self):
        # type: () -> Dict[int,int]
        return self.bunch_map

    def getRunsUsed(self):
        # type: () -> List[int]
        return self.runs_used

    def getNameList(self):
        # type: () -> List[int]
        return self.name_list

    # Returns all the objects of type obj_type we have rate for
    def getObjectList(self, obj_type):
        # type: (str) -> List[str]
        _list = []
        for obj in self.name_list:
            if self.type_map[obj] == obj_type:
                _list.append(obj)
        return _list

    def getTypeMap(self):
        # type: () -> Dict[str,str]
        return self.type_map

    # Return the latest LS for a given run or -1
    def getLastLS(self, run):
        # type: (int) -> int
        if not run in self.runs_used:
            return -1
        index = LUMI_INFO_MAP["LS"]
        return max(self.lumi_info[run], key=lambda x: x[index])[index]

    # Return the latest run for which we have data or -1
    def getLastRun(self):
        # type: () -> int
        if len(self.runs_used) == 0:
            return -1
        return max(self.runs_used)