Esempio n. 1
0
def getThreeClosestDatabasesBackups( infos ):
    """
        @summary : Returns the three databases backups
                  that are the closest to the startTime 
                  asked for the database recollection.
                  
        @param infos :
        
        @return: the three databases backups
                 that are the closest to the startTime 
                 asked for the database recollection.          
    """
    
    closestFiles = []
    differenceFileTuples = []
    files = os.listdir( StatsPaths.STATSDB + 'databasesTimeOfUpdatesBackups/' )
    
    startTimeInEpochformat = StatsDateLib.getSecondsSinceEpoch( infos.databasesRecollectionStartTime )
    
    for file in files:
        #try:
        
        fileDateInIsoFormat = "%s %s" %(str(file).split("_")[0], str(file).split("_")[1] ) 
        
        tupleToAdd = ( abs( StatsDateLib.getSecondsSinceEpoch(fileDateInIsoFormat) - startTimeInEpochformat ), file )
        
        differenceFileTuples.append( tupleToAdd )     
        

    
    for tuple in differenceFileTuples [:3] :
        closestFiles.append( tuple[1] )
        
    return closestFiles
Esempio n. 2
0
    def prepareQuery(self):
        """
            @summary : Buildup the query  to be executed.
        
            @SIDE_EFFECT :  modifies self.query value.
            
        """
        
        global _ 
        
        if self.queryParameters.combine == 'true':
            totals = True
            mergerType = "regular"
        else:
            totals = False      
            mergerType = ""
            
            
        fixedCurrent  = False
        fixedPrevious = False
        
        if _("current")  in str(self.queryParameters.fixedSpan).lower() :
            fixedCurrent = True 
        elif _("previous") in str(self.queryParameters.fixedSpan).lower():
            fixedPrevious = True      
        else:
            fixedCurrent  = False
            fixedPrevious = False 
       

            
        hour      = self.queryParameters.endTime.split(" ")[1]
        splitDate = self.queryParameters.endTime.split(" ")[0].split( '-' )
        
        date =  splitDate[2] + '-' + splitDate[1]  + '-' + splitDate[0]  + " " + hour 
        if self.queryParameters.span == "": 
            timespan = 0 
        else:
            timespan = int(self.queryParameters.span )    
            
        StatsDateLib.setLanguage( self.querierLanguage )
        startTime, endTime = StatsDateLib.getStartEndInIsoFormat(date, timespan, self.queryParameters.specificSpan, fixedCurrent, fixedPrevious )
        
        timespan = int( StatsDateLib.getSecondsSinceEpoch( endTime ) - StatsDateLib.getSecondsSinceEpoch( startTime ) ) / 3600   
        
        combinedMachineName = ""
        for machine in self.queryParameters.machines:
            combinedMachineName = combinedMachineName + machine

        machines = [ combinedMachineName ]
       
        
        self.graphicProducer = RRDGraphicProducer( self.queryParameters.fileTypes[0], self.queryParameters.statsTypes ,\
                                                   totals,  self.queryParameters.specificSpan,\
                                                   self.queryParameters.sourLients, timespan,\
                                                   startTime, endTime, machines, False,
                                                   mergerType, True, self.querierLanguage, self.querierLanguage )
  
        StatsDateLib.setLanguage( LanguageTools.getMainApplicationLanguage() )
Esempio n. 3
0
def getTimeSeperatorsBasedOnAvailableMemory(startTime, endTime, clients,
                                            fileType, machines):
    """    
        @summary: returns the time seperators to be used for the transfer 
                  in a way that should prevent overloading memory. 
        
        @param startTime: start time  of the transfer to be attempted.
        @param endTime:   end time of the transfer to be attempted.
        @param clients:   lists of clients/sources to be transferred.
        @param fileType:  tx or rx.
        @param machines:  machines on wich the clients/sources reside.
        
        @return: the time seperators.
        
    """

    width = 0  # Width in seconds of the transfer to be attempted
    seperators = [
    ]  # Time sperators representing every hour to be transferred.
    allFiles = []  # List of all pickle files that will be involved
    hourlyFiles = []  # List of all files to be handled for a certain hour.
    hourlyFileSizes = [
    ]  # Total file size of all the files to be handled at a certain hour.

    totalSizeToloadInMemory = 0.0  # Total size of all the pickle files to load in memory
    currentlyAvailableMemory = 0.0  # Total currently available memory on the present machine.
    seperatorsBasedOnAvailableMemory = [
        startTime, endTime
    ]  # Suppose we have all the momory we need.

    width = (StatsDateLib.getSecondsSinceEpoch(endTime) -
             StatsDateLib.getSecondsSinceEpoch(startTime)) / StatsDateLib.HOUR

    seperators = [startTime]
    seperators.extend(
        StatsDateLib.getSeparatorsWithStartTime(
            startTime=startTime,
            width=width * StatsDateLib.HOUR,
            interval=StatsDateLib.HOUR)[:-1])

    for seperator in seperators:
        hourlyFiles = PickleMerging.createNonMergedPicklesList(
            seperator, machines, fileType, clients)
        allFiles.extend(hourlyFiles)
        hourlyFileSizes.append(
            MemoryManagement.getTotalSizeListOfFiles(hourlyFiles))

    totalSizeToloadInMemory = MemoryManagement.getTotalSizeListOfFiles(
        allFiles)
    currentlyAvailableMemory = MemoryManagement.getCurrentFreeMemory(
        marginOfError=0.75
    )  #never expect more than 25% of the avaiable memory to be avaiable for pickle loading.

    if totalSizeToloadInMemory >= currentlyAvailableMemory:
        seperatorsBasedOnAvailableMemory = MemoryManagement.getSeperatorsForHourlyTreatments(
            startTime, endTime, currentlyAvailableMemory, hourlyFileSizes)

    return seperatorsBasedOnAvailableMemory
Esempio n. 4
0
 def mergePicklesFromDifferentHours( logger = None , startTime = "2006-07-31 13:00:00",\
                                     endTime = "2006-07-31 19:00:00", client = "satnet",\
                                     machine = "pdsPM", fileType = "tx" ):
     """
         @summary : This method merges entire hourly pickles files together. 
         
         @None    : This does not support merging part of the data of pickles.   
     
     """
     
     if logger != None :
         logger.debug( _("Call to mergeHourlyPickles received.") )
         logging = True
     else:
         logging = False
             
     pickles = []
     entries = {}
     width = StatsDateLib.getSecondsSinceEpoch( endTime ) - StatsDateLib.getSecondsSinceEpoch( startTime )
     startTime = StatsDateLib.getIsoWithRoundedHours( startTime )
     
     seperators = [startTime]
     seperators.extend( StatsDateLib.getSeparatorsWithStartTime( startTime = startTime , width=width, interval=60*StatsDateLib.MINUTE )[:-1])
         
     for seperator in seperators :
         pickles.append( StatsPickler.buildThisHoursFileName(  client = client, offset = 0, currentTime = seperator, machine = machine, fileType = fileType ) )        
     
     
     startingNumberOfEntries = 0
     #print "prior to loading and merging pickles : %s " %( StatsDateLib.getIsoFromEpoch( time.time() ) ) 
     for pickle in pickles : 
         
         if os.path.isfile( pickle ) :
             
                 
             tempCollection = CpickleWrapper.load( pickle )
             if tempCollection != None :
                 for i in xrange( len( tempCollection.fileEntries )  ):
                     entries[startingNumberOfEntries + i] = tempCollection.fileEntries[i]
                 startingNumberOfEntries = startingNumberOfEntries + len( tempCollection.fileEntries ) 
             else:                    
                 sys.exit()
         else:
                        
             emptyEntries =  PickleMerging.fillWithEmptyEntries( nbEmptyEntries = 60, entries = {} )
             for i in xrange( 60 ):
                 entries[i + startingNumberOfEntries ] = emptyEntries [i]
             startingNumberOfEntries = startingNumberOfEntries + 60
     
     #print "after the  loading and merging og pickles : %s " %( StatsDateLib.getIsoFromEpoch( time.time() ) )        
     
     statsCollection = FileStatsCollector(  startTime = startTime , endTime = endTime, interval = StatsDateLib.MINUTE, totalWidth = width, fileEntries = entries,fileType= fileType, logger = logger, logging = logging )
        
             
     return statsCollection        
Esempio n. 5
0
def buildCsvFileName(infos):
    """ 
    
        @summary: Builds and returns the file name to use for the csv file.
        
        @param infos: _CvsInfos instance containing the required 
                      information to build up the file name.
        
        @return: Return the built up file name.              
                      
    """

    global _

    StatsDateLib.setLanguage(infos.outputLanguage)
    paths = StatsPaths()
    paths.setPaths(infos.outputLanguage)

    machinesStr = str(infos.machinesForLabels).replace('[', '').replace(
        ']', '').replace(',', '').replace("'",
                                          "").replace('"',
                                                      '').replace(' ', '')

    currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime(
        StatsDateLib.getSecondsSinceEpoch(infos.start))
    currentWeek = time.strftime(
        "%W", time.gmtime(StatsDateLib.getSecondsSinceEpoch(infos.start)))

    fileName = paths.STATSCSVFILES

    if infos.span == "daily":
        fileName = fileName + "/" + _(
            "daily/") + infos.fileType + "/%s/%s/%s/%s.csv" % (
                machinesStr, currentYear, currentMonth, currentDay)

    elif infos.span == "weekly":
        fileName = fileName + "/" + _(
            "weekly/") + infos.fileType + "/%s/%s/%s.csv" % (
                machinesStr, currentYear, currentWeek)

    elif infos.span == "monthly":
        fileName = fileName + "/" + _(
            "monthly/") + infos.fileType + "/%s/%s/%s.csv" % (
                machinesStr, currentYear, currentMonth)

    elif infos.span == "yearly":
        fileName = fileName + "/" + _(
            "yearly/") + infos.fileType + "/%s/%s.csv" % (machinesStr,
                                                          currentYear)

    StatsDateLib.setLanguage(LanguageTools.getMainApplicationLanguage())

    return fileName
Esempio n. 6
0
 def getSeperatorsForHourlyTreatments( startTime, endTime, currentFreeMemory, fileSizesPerHour, usage= "rrd"  ):    
     """
     
         @summary : returns a list of time seperators based on a list of file and 
                    the current amount of free memory. Each seperator represents the time 
                    associated with a certain hourly file. Each seperator will represent
                    the maximum amount of files that can be treated at the same time 
                    without busting the current memory. 
         
         @attention: List fo files MUST refer to hourly files. 
         
         @param startTime: Startime in iso format of the interval to work with.
         @param endTime: End time in iso format of the interval to work with.
         @param currentFreeMemory: Maximum amout of memory to use per seperation.
         @param fileSizesPerHour: size of the file(s) to be treated at every hour.
         
         @return: Returns the time seperators. 
                
     """
     
     currentTotalFileSizes = 0 
     currentTime = StatsDateLib.getSecondsSinceEpoch(startTime)
     seperators = [startTime]         
     
     if fileSizesPerHour[0] < currentFreeMemory:              
         
         for fileSizePerHour in fileSizesPerHour :
             currentTotalFileSizes = currentTotalFileSizes + fileSizePerHour
             
             if currentFreeMemory < currentTotalFileSizes:
                 seperators.append( StatsDateLib.getIsoFromEpoch(currentTime))
                 currentTotalFileSizes = 0
 
                 
             currentTime = currentTime + StatsDateLib.HOUR
     else:
         raise Exception( "Cannot build seperators. First file will not even fit within current available memory." )
         
     if seperators[len(seperators) -1 ] !=  endTime :
         seperators.append( endTime )
                 
     if len(seperators) > 2 : #If any "in between seperators were added"
         i = 1
         currentLength = len(seperators) -1
         while i < currentLength: #add 1 minute 
             if usage == "rrd":
                 seperators.insert(i+1, StatsDateLib.getIsoFromEpoch( (StatsDateLib.getSecondsSinceEpoch(seperators[i]) + StatsDateLib.MINUTE)))
             else:
                 seperators.insert( i+1, StatsDateLib.getSecondsSinceEpoch(seperators[i]) )
             currentLength = currentLength + 1
             i = i + 2
                     
     return seperators    
 def setMonths( self ):
     """
         @Summary : Sets the months value to an array containing
                    the last X months in "since epoch" numbers
                    based on the globally set NB_MONTHS_DISPLAYED
                   value.
     
     """
     
     currentTime = time.time()
     currentTime = StatsDateLib.getIsoFromEpoch( currentTime )
     currentDate = datetime.date( int(currentTime[0:4]), int(currentTime[5:7]), 1 )  # day always  = 1 in case currentDate.day > 28 
        
     months = []   
         
     for i in range(0,NB_MONTHS_DISPLAYED):
         
         if currentDate.month - (i%12) < 1 :            
             month = currentDate.month - (i%12)+12 
         
         if  currentDate.month -i < 1:
             year  = currentDate.year - int( abs(math.floor( float( ( currentDate.month - i  ) / 12 ) ) ) ) 
         
         else :                 
             month = currentDate.month - i             
             year = currentDate.year                      
          
         months.append( StatsDateLib.getSecondsSinceEpoch( "%s-%s-%s 00:00:00" %(year,month,"01") ) )
             
     months.reverse()
         
     self.months =  months
     print months
Esempio n. 8
0
 def setMonths( self ):
     """
         Returns the 3 months including current month.
     
     """
     
     currentTime = time.time()
     currentTime = StatsDateLib.getIsoFromEpoch( currentTime )
     currentDate = datetime.date( int(currentTime[0:4]), int(currentTime[5:7]), 1 )     
        
     months = []
     
        
     for i in range(0,5):
         
         if currentDate.month -i < 1 :
             month = currentDate.month -i + 12
             year  = currentDate.year -i 
         else :     
             month = currentDate.month -i 
             year = currentDate.year
             
        
         newdate = StatsDateLib.getSecondsSinceEpoch( "%s-%s-01 00:00:00" %( year,month ) ) 
         months.append( newdate )
         #print year,month,day
     
     months.reverse()
         
     self.months = months
Esempio n. 9
0
def getTimeSeperatorsBasedOnAvailableMemory( startTime, endTime, clients, fileType, machines ):
    """    
        @summary: returns the time seperators to be used for the transfer 
                  in a way that should prevent overloading memory. 
        
        @param startTime: start time  of the transfer to be attempted.
        @param endTime:   end time of the transfer to be attempted.
        @param clients:   lists of clients/sources to be transferred.
        @param fileType:  tx or rx.
        @param machines:  machines on wich the clients/sources reside.
        
        @return: the time seperators.
        
    """
    
    width = 0        # Width in seconds of the transfer to be attempted
    seperators = []  # Time sperators representing every hour to be transferred.
    allFiles =[]     # List of all pickle files that will be involved
    hourlyFiles = [] # List of all files to be handled for a certain hour.
    hourlyFileSizes = [] # Total file size of all the files to be handled at a certain hour.  
    
    
    totalSizeToloadInMemory = 0.0  # Total size of all the pickle files to load in memory
    currentlyAvailableMemory = 0.0 # Total currently available memory on the present machine.
    seperatorsBasedOnAvailableMemory = [startTime, endTime] # Suppose we have all the momory we need.    

    width = ( StatsDateLib.getSecondsSinceEpoch( endTime ) -  StatsDateLib.getSecondsSinceEpoch( startTime ) ) / StatsDateLib.HOUR    
    
    seperators = [ startTime ]
    seperators.extend( StatsDateLib.getSeparatorsWithStartTime( startTime =  startTime , width= width*StatsDateLib.HOUR, interval=StatsDateLib.HOUR )[:-1])
    
    for seperator in seperators:      
        hourlyFiles = PickleMerging.createNonMergedPicklesList( seperator, machines, fileType, clients )
        allFiles.extend( hourlyFiles )        
        hourlyFileSizes.append( MemoryManagement.getTotalSizeListOfFiles( hourlyFiles )    )
    
    
    totalSizeToloadInMemory = MemoryManagement.getTotalSizeListOfFiles( allFiles )
    currentlyAvailableMemory = MemoryManagement.getCurrentFreeMemory( marginOfError = 0.75 )#never expect more than 25% of the avaiable memory to be avaiable for pickle loading.    
    
    if totalSizeToloadInMemory >= currentlyAvailableMemory:
        seperatorsBasedOnAvailableMemory = MemoryManagement.getSeperatorsForHourlyTreatments( startTime, endTime, currentlyAvailableMemory, hourlyFileSizes  )
          
    return seperatorsBasedOnAvailableMemory
Esempio n. 10
0
 def isFirstUpdateOfTheWeek( self, timeOfUpdateInIsoFormat = "" ): 
     """
         @summary : Returns whether or not an update executed at 
                    timeOfUpdateInIsoFormat would be the first update 
                    of the week.
                    
         @timeOfUpdateInIsoFormat : Time at which the update would be executed.
         
         @return : True or False.
                     
     """
     
     isFirstUpdateOfTheWeek = False
     
     lastUpdateISO = self.getTimeOfLastUpdateInLogs()
     
     if timeOfUpdateInIsoFormat == "" :
         timeOfUpdateInIsoFormat = StatsDateLib.getCurrentTimeInIsoformat()
         
     if timeOfUpdateInIsoFormat >  lastUpdateISO :
         lastUpdateDT    = datetime( int( lastUpdateISO.split("-")[0]),\
                                     int( lastUpdateISO.split("-")[1]),\
                                     int( lastUpdateISO.split("-")[1].split(" ")[0] )\
                                    )
    
         currentUpdateDT = datetime( int( timeOfUpdateInIsoFormat.split("-")[0]),\
                                     int( timeOfUpdateInIsoFormat.split("-")[1]),\
                                     int( timeOfUpdateInIsoFormat.split("-")[1].split(" ")[0] )\
                                    )
         
         weekNumberOfLastUpdate    = time.strftime( '%W', time.gmtime( StatsDateLib.getSecondsSinceEpoch( lastUpdateISO ) ) )
         weekNumberOfCurrentUpdate = time.strftime( '%W', time.gmtime( StatsDateLib.getSecondsSinceEpoch( timeOfUpdateInIsoFormat ) ) )
         
         timeBetweenBothDates = currentUpdateDT - lastUpdateDT
         daysBetween = timeBetweenBothDates.days
         
         if daysBetween < 7 and ( weekNumberOfLastUpdate == weekNumberOfCurrentUpdate ):  #<7 days prevents same week but from different years.
             isFirstUpdateOfTheWeek = False
         else:
             isFirstUpdateOfTheWeek = True    
             
     
     return isFirstUpdateOfTheWeek
Esempio n. 11
0
def buildCsvFileName( infos ):
    """ 
    
        @summary: Builds and returns the file name to use for the csv file.
        
        @param infos: _CvsInfos instance containing the required 
                      information to build up the file name.
        
        @return: Return the built up file name.              
                      
    """
    
    global _ 
    
    StatsDateLib.setLanguage(infos.outputLanguage)
    paths = StatsPaths()
    paths.setPaths( infos.outputLanguage )
    
    machinesStr = str(infos.machinesForLabels).replace('[','').replace( ']','' ).replace(',', '').replace("'","").replace( '"','').replace( ' ','' )
    
    currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( StatsDateLib.getSecondsSinceEpoch (infos.start) )     
    currentWeek = time.strftime( "%W", time.gmtime( StatsDateLib.getSecondsSinceEpoch (infos.start) ) )
    
    
    fileName = paths.STATSCSVFILES
   
    if infos.span == "daily":
        fileName = fileName + "/" + _("daily/") + infos.fileType + "/%s/%s/%s/%s.csv" %( machinesStr, currentYear, currentMonth, currentDay )   
    
    elif infos.span == "weekly":
        fileName = fileName + "/" +  _("weekly/") + infos.fileType  + "/%s/%s/%s.csv" %( machinesStr, currentYear, currentWeek ) 
    
    elif infos.span == "monthly":
        fileName = fileName + "/" + _("monthly/") + infos.fileType + "/%s/%s/%s.csv" %( machinesStr, currentYear, currentMonth )
    
    elif infos.span == "yearly":
        fileName = fileName + "/" + _("yearly/") + infos.fileType  + "/%s/%s.csv" %( machinesStr, currentYear )
        
    
    StatsDateLib.setLanguage( LanguageTools.getMainApplicationLanguage() )    
        
    return fileName 
Esempio n. 12
0
    def getXTics( self ):
        """
           
           @summary : This method builds all the xtics used to seperate data on the x axis.
            
                      Xtics values will are used in the plot method so they will be drawn on 
                      the graphic. 
           
           @note:     All xtics will be devided hourly. This means a new xtic everytime 
                      another hour has passed since the starting point.  
            
            
        """
        
        _ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.workingLanguage )
        #print "get x tics"
        if self.logger != None :
            self.logger.debug( _("Call to getXtics received") )
        
        nbBuckets = ( len( self.stats[0].statsCollection.timeSeperators ) )
        xtics = ''
        startTime = StatsDateLib.getSecondsSinceEpoch( self.stats[0].statsCollection.timeSeperators[0] )
        
        if nbBuckets != 0 :
            
            for i in range(0, nbBuckets ):
                 
                   
                if ( (  StatsDateLib.getSecondsSinceEpoch(self.stats[0].statsCollection.timeSeperators[i]) - ( startTime  ) ) %(60*60)  == 0.0 ): 
                    
                    hour = StatsDateLib.getHoursFromIso( self.stats[0].statsCollection.timeSeperators[i] )
                    
                    xtics += '"%s" %i, '%(  hour , StatsDateLib.getSecondsSinceEpoch(self.stats[0].statsCollection.timeSeperators[i] ) )

        
        #print nbBuckets
        #print "len xtics %s" %len(xtics) 
        return xtics[:-2]
Esempio n. 13
0
 def getTimeSinceLastUpdate(self, currentTimeInIsoFormat = "" ):
     """
         @summary : returns the number of seconds between the last update
                    and the currentTime  
     
         @param  currentTimeInIsoFormat: Current time specified in the ISO 
                                         format
                                         
         @return :  the number of seconds between the last update
                    and the currentTime                               
     """
     
     timeBetweenUpdates = 0 
     
     if currentTimeInIsoFormat == "":
         currentTimeInIsoFormat = StatsDateLib.getCurrentTimeInIsoformat()
    
     currentTimeInSSEFormat = StatsDateLib.getSecondsSinceEpoch( currentTimeInIsoFormat )   
     lastUpdateInSSEFormat  =  StatsDateLib.getSecondsSinceEpoch( self.getTimeOfLastUpdateInLogs() )
     
     if currentTimeInSSEFormat > lastUpdateInSSEFormat :
         timeBetweenUpdates = currentTimeInSSEFormat - lastUpdateInSSEFormat
     
     return timeBetweenUpdates
Esempio n. 14
0
def getThreeClosestDatabasesBackups(infos):
    """
        @summary : Returns the three databases backups
                  that are the closest to the startTime 
                  asked for the database recollection.
                  
        @param infos :
        
        @return: the three databases backups
                 that are the closest to the startTime 
                 asked for the database recollection.          
    """

    closestFiles = []
    differenceFileTuples = []
    files = os.listdir(StatsPaths.STATSDB + 'databasesTimeOfUpdatesBackups/')

    startTimeInEpochformat = StatsDateLib.getSecondsSinceEpoch(
        infos.databasesRecollectionStartTime)

    for file in files:
        #try:

        fileDateInIsoFormat = "%s %s" % (str(file).split("_")[0],
                                         str(file).split("_")[1])

        tupleToAdd = (abs(
            StatsDateLib.getSecondsSinceEpoch(fileDateInIsoFormat) -
            startTimeInEpochformat), file)

        differenceFileTuples.append(tupleToAdd)

    for tuple in differenceFileTuples[:3]:
        closestFiles.append(tuple[1])

    return closestFiles
Esempio n. 15
0
    def buildThisHoursFileName(client="someclient",
                               offset=0,
                               currentTime="",
                               fileType="tx",
                               machine="someMachineName"):
        """ 
            @summary : Builds a filename using current currentTime.
            
            @Note : The format will be something like this :
                    StatsPaths.STATSPICKLES/clientName/date/TXorRX//machine_hour
                    Ex : StatsPaths.STATSPICKLES/clientName/20060707/tx/machinex_12:00:00
            
                    offset can be used to find a file from an hour close to the current one 
            
                    tempcurrentTime can also be used to build a filename from another hour. 
            
            
            @warning :To be used only with pickles created hourly.
                
        """

        timeFolder = ""

        if currentTime == "":
            currentTime = time.time()
        else:
            currentTime = StatsDateLib.getSecondsSinceEpoch(currentTime)

        currentTime = currentTime + (offset * StatsDateLib.HOUR)
        splitTime = time.gmtime(currentTime)

        for i in range(3):

            if int(splitTime[i]) < 10:
                timeFolder = timeFolder + "0" + str(splitTime[i])
            else:
                timeFolder = timeFolder + str(splitTime[i])

        hour = StatsDateLib.getHoursFromIso(
            StatsDateLib.getIsoFromEpoch(currentTime))

        maxLt = (os.statvfs(STATSPATHS.STATSPICKLES)[statvfs.F_NAMEMAX])

        fileName = ("%s" + "%." + str(maxLt) + "s/%s/%s/%." + str(maxLt) +
                    "s_%s") % (STATSPATHS.STATSPICKLES, client, timeFolder,
                               fileType, str(machine), str(hour))

        return fileName
Esempio n. 16
0
    def filterClientsNamesUsingWilcardFilters(currentTime, timespan,
                                              clientNames, machines,
                                              fileTypes):
        """
        
            @param currentTime: currentTime specified in the parameters.
            @param timespan: Time span specified within the parameters.
            @param clientNames:List of client names found in the parameters.
        
        """

        newClientNames = []

        end = currentTime
        start = StatsDateLib.getIsoFromEpoch(
            StatsDateLib.getSecondsSinceEpoch(currentTime) -
            60 * 60 * timespan)

        if len(clientNames) >= len(fileTypes) or len(fileTypes) == 1:

            if len(fileTypes) == 1:
                for i in range(1, len(clientNames)):
                    fileTypes.append(fileTypes[0])

            for clientName, fileType in map(None, clientNames, fileTypes):

                if '?' in clientName or '*' in clientName:

                    pattern = clientName

                    rxHavingRun, txHavingRun = GeneralStatsLibraryMethods.getRxTxNamesHavingRunDuringPeriod(
                        start, end, machines, pattern)

                    if fileType == "rx":
                        namesHavingrun = rxHavingRun
                    else:
                        namesHavingrun = txHavingRun

                    newClientNames.extend(namesHavingrun)

                else:
                    newClientNames.append(clientName)

        return newClientNames
Esempio n. 17
0
 def buildThisHoursFileName(  client = "someclient", offset = 0, currentTime = "", fileType = "tx", machine = "someMachineName" ):
     """ 
         @summary : Builds a filename using current currentTime.
         
         @Note : The format will be something like this :
                 StatsPaths.STATSPICKLES/clientName/date/TXorRX//machine_hour
                 Ex : StatsPaths.STATSPICKLES/clientName/20060707/tx/machinex_12:00:00
         
                 offset can be used to find a file from an hour close to the current one 
         
                 tempcurrentTime can also be used to build a filename from another hour. 
         
         
         @warning :To be used only with pickles created hourly.
             
     """    
     
     timeFolder = ""
            
     if currentTime == "":
         currentTime = time.time()
     else:
         currentTime = StatsDateLib.getSecondsSinceEpoch( currentTime )    
     
     currentTime = currentTime + ( offset * StatsDateLib.HOUR )
     splitTime = time.gmtime( currentTime )    
             
     for i in range( 3 ):
         
         if int( splitTime[i] ) < 10 :
             timeFolder = timeFolder + "0" + str( splitTime[i] )
         else:
             timeFolder = timeFolder + str( splitTime[i] )          
     
             
     hour = StatsDateLib.getHoursFromIso( StatsDateLib.getIsoFromEpoch( currentTime ) )
     
     maxLt = ( os.statvfs( STATSPATHS.STATSPICKLES )[statvfs.F_NAMEMAX])
     
     fileName = ( "%s" + "%." +  str( maxLt ) + "s/%s/%s/%." + str( maxLt ) + "s_%s" )   %( STATSPATHS.STATSPICKLES, client, timeFolder,  fileType, str(machine),  str(hour) )  
             
     return fileName 
Esempio n. 18
0
 def setYears( self ):
     """
         Returns the last 3 year numbers including the current year.
     
     """
     
     currentTime = time.time()
     currentTime = StatsDateLib.getIsoFromEpoch( currentTime )
     currentDate = datetime.date( int(currentTime[0:4]), int(currentTime[5:7]), 1 )     
     
     years = []    
 
     for i in range(0,3):
         year = currentDate.year - i
         newDate = StatsDateLib.getSecondsSinceEpoch( "%s-%s-%s 00:00:00" %(year, currentDate.month, currentDate.day) )
         years.append(  newDate )
         
     years.reverse()
        
     self.years =  years   
 def filterClientsNamesUsingWilcardFilters( currentTime, timespan, clientNames, machines, fileTypes ):
     """
     
         @param currentTime: currentTime specified in the parameters.
         @param timespan: Time span specified within the parameters.
         @param clientNames:List of client names found in the parameters.
     
     """
     
     newClientNames = []
     
     end   = currentTime
     start = StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch(currentTime)- 60*60*timespan )
     
     if len(clientNames) >=  len( fileTypes ) or len( fileTypes ) ==1:
         
         if len( fileTypes ) == 1 :
             for i in range(1, len( clientNames ) ):
                 fileTypes.append( fileTypes[0])
         
         for clientName,fileType in map( None, clientNames, fileTypes ):
                             
             if  '?' in clientName or '*' in clientName :           
                 
                 pattern =clientName
                
                 rxHavingRun,txHavingRun = GeneralStatsLibraryMethods.getRxTxNamesHavingRunDuringPeriod(start, end, machines, pattern)
                 
                 if fileType == "rx":
                     namesHavingrun = rxHavingRun
                 else:    
                     namesHavingrun = txHavingRun
                 
                 newClientNames.extend( namesHavingrun )   
                     
                     
             else:
                 newClientNames.append( clientName )   
         
         
     return newClientNames
Esempio n. 20
0
 def getStartTimeAndEndTime( self, collectUptoNow = False ):
     """
         @summary : Returns the startTime and endTime of the graphics.
         
         @warning : collectUptoNow not yet supported in program !
         
         @return : the startTime and endTime of the graphics.
         
     """
     
     
     #Now not yet implemented.
     if collectUptoNow == True :
         endTime = self.currentTime
         
     else :
         endTime = StatsDateLib.getIsoWithRoundedHours( self.currentTime )
         
     startTime = StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch( endTime ) - (self.timespan * StatsDateLib.HOUR) )  
      
     return startTime, endTime
Esempio n. 21
0
 def getMissingWeeksBetweenUpdates(self, update1InIsoFormat, update2InIsoFormat ):
     """
         @summary : Returns the list of days between update date 1 and update date 2.
         
         @Note : If update1InIsoFormat = 2008-02-28 15:00:00 and 
                    update2InIsoFormat = 2008-02-28 15:00:00
                 this method would return [ 2008-02-28 15:00:00 ]    
         
         @return : Returns the list of days between update date 1 and update date 2.
                   
     """
     
     missingWeeks = []
     
     if update2InIsoFormat > update1InIsoFormat:
             weekInIsoFormat = update1InIsoFormat
             while weekInIsoFormat <= update2InIsoFormat :
                 missingWeeks.append( weekInIsoFormat )
                 weekInIsoFormat = StatsDateLib.getIsoFromEpoch(  StatsDateLib.getSecondsSinceEpoch( weekInIsoFormat ) + ( StatsDateLib.DAY*7 )  )
                
     return missingWeeks[:-1] 
Esempio n. 22
0
    def getStartTimeAndEndTime(self, collectUptoNow=False):
        """
            @summary : Returns the startTime and endTime of the graphics.
            
            @warning : collectUptoNow not yet supported in program !
            
            @return : the startTime and endTime of the graphics.
            
        """

        #Now not yet implemented.
        if collectUptoNow == True:
            endTime = self.currentTime

        else:
            endTime = StatsDateLib.getIsoWithRoundedHours(self.currentTime)

        startTime = StatsDateLib.getIsoFromEpoch(
            StatsDateLib.getSecondsSinceEpoch(endTime) -
            (self.timespan * StatsDateLib.HOUR))

        return startTime, endTime
Esempio n. 23
0
def getDataFromDatabases(sourlients, dataTypes, infos):
    """
        @summary: Gathers up all the requried data from allthe concerned databases 
    
        @param sourlients: List of sources clients for wich we need to gather up data.
        
        @param machines: Machines on which the clients reside.
        
        @param dataTypes: Datatypes for which we need to collect data.
        
        @return : Return the data dictionary filled with all the collected data.
        
    """

    data = {}

    for sourlient in sourlients.keys():
        data[sourlient] = {}

        sourlientsMachines = sourlients[sourlient]

        for machine in infos.machinesToSearch:

            if infos.machinesAreClusters == True:
                machineConfig = MachineConfigParameters()
                machineConfig.getParametersFromMachineConfigurationFile()
                machines = machineConfig.getMachinesAssociatedWith(machine)
                oldMachine = machine
                machine = str(machines).replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')\
                          .replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')

            if machine == '':
                #print "trouvaille !!!"
                machine = oldMachine

            if machine in sourlientsMachines:
                data[sourlient][machine] = {}

                for dataType in dataTypes:

                    if infos.outputLanguage != 'en':
                        translatedDataType = LanguageTools.translateDataType(
                            dataType, "en", infos.outputLanguage)
                    else:
                        translatedDataType = dataType

                    databaseName = RrdUtilities.buildRRDFileName(
                        dataType=translatedDataType,
                        clients=[sourlient],
                        machines=[machine],
                        fileType=infos.fileType)

                    if not os.path.isfile(databaseName):
                        if infos.includegroups == True:
                            databaseName = RrdUtilities.buildRRDFileName(
                                dataType=translatedDataType,
                                groupName=sourlient,
                                machines=[machine],
                                fileType=infos.fileType,
                                usage="group")

                    lastUpdate = RrdUtilities.getDatabaseTimeOfUpdate(
                        databaseName, infos.fileType)

                    fetchedInterval = getInterval(int(
                        StatsDateLib.getSecondsSinceEpoch(infos.start)),
                                                  lastUpdate,
                                                  dataType,
                                                  goal="fetchData")
                    desiredInterval = getInterval(int(
                        StatsDateLib.getSecondsSinceEpoch(infos.start)),
                                                  lastUpdate,
                                                  dataType,
                                                  goal="plotGraphic")
                    interval = desiredInterval
                    minimum, maximum, mean, total = getGraphicsMinMaxMeanTotal( databaseName, int(StatsDateLib.getSecondsSinceEpoch(infos.start)),\
                                                                                int(StatsDateLib.getSecondsSinceEpoch(infos.end)), infos.span,\
                                                                                fetchedInterval,desiredInterval, type = "average" )
                    data[sourlient][machine][dataType] = {}
                    data[sourlient][machine][dataType]["min"] = minimum
                    data[sourlient][machine][dataType]["max"] = maximum
                    data[sourlient][machine][dataType]["mean"] = mean
                    data[sourlient][machine][dataType]["total"] = total

    return data
Esempio n. 24
0
    def prepareQuery(self):
        """
            @summary : Buildup the query  to be executed.
        
            @SIDE_EFFECT :  modifies self.query value.
            
        """

        global _

        if self.queryParameters.combine == 'true':
            totals = True
            mergerType = "regular"
        else:
            totals = False
            mergerType = ""

        fixedCurrent = False
        fixedPrevious = False

        if _("current") in str(self.queryParameters.fixedSpan).lower():
            fixedCurrent = True
        elif _("previous") in str(self.queryParameters.fixedSpan).lower():
            fixedPrevious = True
        else:
            fixedCurrent = False
            fixedPrevious = False

        hour = self.queryParameters.endTime.split(" ")[1]
        splitDate = self.queryParameters.endTime.split(" ")[0].split('-')

        date = splitDate[2] + '-' + splitDate[1] + '-' + splitDate[
            0] + " " + hour
        if self.queryParameters.span == "":
            timespan = 0
        else:
            timespan = int(self.queryParameters.span)

        StatsDateLib.setLanguage(self.querierLanguage)
        startTime, endTime = StatsDateLib.getStartEndInIsoFormat(
            date, timespan, self.queryParameters.specificSpan, fixedCurrent,
            fixedPrevious)

        timespan = int(
            StatsDateLib.getSecondsSinceEpoch(endTime) -
            StatsDateLib.getSecondsSinceEpoch(startTime)) / 3600

        combinedMachineName = ""
        for machine in self.queryParameters.machines:
            combinedMachineName = combinedMachineName + machine

        machines = [combinedMachineName]


        self.graphicProducer = RRDGraphicProducer( self.queryParameters.fileTypes[0], self.queryParameters.statsTypes ,\
                                                   totals,  self.queryParameters.specificSpan,\
                                                   self.queryParameters.sourLients, timespan,\
                                                   startTime, endTime, machines, False,
                                                   mergerType, True, self.querierLanguage, self.querierLanguage )

        StatsDateLib.setLanguage(LanguageTools.getMainApplicationLanguage())
Esempio n. 25
0
 def mergePicklesFromSameHour( logger = None , pickleNames = None, mergedPickleName = "",\
                               clientName = "" , combinedMachineName = "", currentTime = "",\
                               fileType = "tx" ):
     """
         @summary: This methods receives a list of filenames referring to pickled FileStatsEntries.
         
                   After the merger pickles get saved since they might be reused somewhere else.
         
         @precondition:  Pickle should be of the same timespan and bucket width.
                         If not no merging will occur.  
         
     """
     
     
     if logger != None : 
         logger.debug( _("Call to mergePickles received.") )
         logging = True
     else:
         logging = False
             
     entryList = []
     
     
     for pickle in pickleNames:#for every pickle we eneed to merge
         
         if os.path.isfile( pickle ):
             
             entryList.append( CpickleWrapper.load( pickle ) )
                         
         else:#Use empty entry if there is no existing pickle of that name
             
             endTime = StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch( currentTime ) + StatsDateLib.HOUR ) 
             entryList.append( FileStatsCollector( startTime = currentTime, endTime = endTime,logger =logger, logging =logging   ) )         
             
             if logger != None :
                 logger.warning( _("Pickle named %s did not exist. Empty entry was used instead.") %pickle )    
     
     
     #start off with a carbon copy of first pickle in list.
     newFSC = FileStatsCollector( files = entryList[0].files , statsTypes =  entryList[0].statsTypes, startTime = entryList[0].startTime,\
                                  endTime = entryList[0].endTime, interval=entryList[0].interval, totalWidth = entryList[0].totalWidth,\
                                  firstFilledEntry = entryList[0].firstFilledEntry, lastFilledEntry = entryList[0].lastFilledEntry,\
                                  maxLatency = entryList[0].maxLatency, fileEntries = entryList[0].fileEntries,logger = logger,\
                                  logging = logging )
              
     if PickleMerging.entryListIsValid( entryList ) == True :
         
         for i in range ( 1 , len( entryList ) ): #add other entries 
             
             for file in entryList[i].files :
                 if file not in newFSC.files :
                     newFSC.files.append( file ) 
             
             for j in range( len( newFSC.fileEntries ) ) : # add all entries                        
                 
                 newFSC.fileEntries[j].values.productTypes.extend( entryList[i].fileEntries[j].values.productTypes )
                 newFSC.fileEntries[j].files.extend( entryList[i].fileEntries[j].files )
                 newFSC.fileEntries[j].times.extend( entryList[i].fileEntries[j].times )  
                 newFSC.fileEntries[j].nbFiles = newFSC.fileEntries[j].nbFiles + ( newFSC.fileEntries[ j ].nbFiles)                    
                 
                 for type in newFSC.statsTypes :
                     newFSC.fileEntries[j].values.dictionary[type].extend( entryList[i].fileEntries[j].values.dictionary[type] ) 
                                            
                 newFSC.fileEntries[j].values.rows = newFSC.fileEntries[j].values.rows + entryList[i].fileEntries[j].values.rows
             
         newFSC = newFSC.setMinMaxMeanMedians( startingBucket = 0 , finishingBucket = newFSC.nbEntries -1 )
              
            
     else:#Did not merge pickles named. Pickle list was not valid."
         
         if logger != None :
             logger.warning( _("Did not merge pickles named : %s. Pickle list was not valid.") %pickleNames )
             logger.warning( _("Filled with empty entries instead.") %pickleNames )
             
         newFSC.fileEntries = PickleMerging.fillWithEmptyEntries( nbEmptyEntries = 60 , entries = {} )    
     
     
     #prevents us from having ro remerge file later on.    
     temp = newFSC.logger
     del newFSC.logger
     CpickleWrapper.save( newFSC, mergedPickleName )
     try:
         os.chmod( mergedPickleName, 0777 )
     except:
         pass    
     
     #print "saved :%s" %mergedPickleName
     newFSC.logger = temp
     
     return newFSC
Esempio n. 26
0
def getGraphicProducerFromParserOptions( parser ):
    """
        
        This method parses the argv received when the program was called
        It takes the params wich have been passed by the user and sets them 
        in the corresponding fields of the infos variable.   
    
        If errors are encountered in parameters used, it will immediatly terminate 
        the application. 
    
    """ 
    
    graphicType = _("other")
    mergerType = ""
    
    ( options, args )= parser.parse_args()        
    timespan         = options.timespan
    machines         = options.machines.replace( ' ','').split(',')
    clientNames      = options.clients.replace( ' ','' ).split(',')
    types            = options.types.replace( ' ', '').split(',')
    date             = options.date.replace('"','').replace("'",'')
    fileType         = options.fileType.replace("'",'')
    havingRun        = options.havingRun
    individual       = options.individual
    totals           = options.totals
    daily            = options.daily
    weekly           = options.weekly
    monthly          = options.monthly
    yearly           = options.yearly    
    fixedCurrent     = options.fixedCurrent
    fixedPrevious    = options.fixedPrevious
    copy             = options.copy
    turnOffLogging   = options.turnOffLogging
    outputLanguage   = options.outputLanguage
    
    
    if outputLanguage == "":
        outputLanguage = LanguageTools.getMainApplicationLanguage()
    else :
        if outputLanguage not in LanguageTools.getSupportedLanguages():
            print _("Error. The specified language is not currently supported by this application.")
            print _("Please specify one of the following languages %s or use the default value()" %( str( LanguageTools.getSupportedLanguages() ).replace("[","").replace("]",""), LanguageTools.getMainApplicationLanguage()  ) )
            print _("Program terminated.")
            sys.exit()
            
    counter = 0  
    specialParameters = [daily, monthly, weekly, yearly]
    for specialParameter in specialParameters:
        if specialParameter:
            counter = counter + 1 
            
    if counter > 1 :
        print _("Error. Only one of the daily, weekly and yearly options can be use at a time ")
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()
    
    elif counter == 1 and timespan != None :
        print _("Error. When using the daily, the weekly or the yearly options timespan cannot be specified. " )
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()
        
    elif counter == 0:    
        if fixedPrevious or fixedCurrent:
            print _("Error. When using one of the fixed options, please use either the -d -m -w or -y options. " )
            print _("Use -h for help.")
            print _("Program terminated.")
            sys.exit()
        
        if copy :
            if daily or not( weekly or monthly or yearly ):
                print _("Error. Copying can only be used with the -m -w or -y options. ") 
                print _("Use -h for help.")
                print _("Program terminated.")
            
                
    if counter == 0 and timespan == None :
        timespan = 12
        
    if fixedPrevious and fixedCurrent:
        print _("Error. Please use only one of the fixed options,either fixedPrevious or fixedCurrent. ") 
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()  
    
    if individual and totals:
        print _("Error. Please use only one of the group options,either individual or totals. ")
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()  
    
    try: # Makes sure date is of valid format. 
         # Makes sure only one space is kept between date and hour.
        t =  time.strptime( date, '%Y-%m-%d %H:%M:%S' )
        split = date.split()
        date = "%s %s" %( split[0], split[1] )

    except:    
        print _("Error. The date format must be YYYY-MM-DD HH:MM:SS")
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()         
        
    
    #Set graphic type based on parameters. Only one tpye is allowed at once based on previous validation.
    if daily :
        graphicType = _("daily")
        if fixedPrevious == False and fixedCurrent == False :
            timespan = 24
    elif weekly:
        graphicType = _("weekly")
        if fixedPrevious == False and fixedCurrent == False :
            timespan = 24 * 7
    elif monthly:
        graphicType = _("monthly")
        if fixedPrevious == False and fixedCurrent == False :
            timespan = 24 * 30
    elif yearly:
        graphicType = _("yearly")      
        if fixedPrevious == False and fixedCurrent == False :
            timespan = 24 * 365
    
    
    start, end = StatsDateLib.getStartEndInIsoFormat(date, timespan, graphicType, fixedCurrent, fixedPrevious )
    
    
    timespan = int( StatsDateLib.getSecondsSinceEpoch( end ) - StatsDateLib.getSecondsSinceEpoch( start ) ) / 3600    
                     
            
    #print "timespan %s" %timespan                           
    try:    
        if int( timespan ) < 1 :
            raise 
                
    except:
        
        print _("Error. The timespan value needs to be an integer one above 0.") 
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()        
         
    if fileType != "tx" and fileType != "rx":        
        print _("Error. File type must be either tx or rx.")
        print  "Specified file type was : ", fileType
        print _("Multiple types are not accepted.") 
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()            
        
                
    if havingRun == True and clientNames[0] != _("ALL"):
        print _("Error. Cannot use the havingRun option while specifying client/source names.")
        print _("To use havingRun, do not use -c|--client option.")
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()
    
    if clientNames[0] == _("ALL"):
        # Get all of the client/sources that have run between graph's start and end. 
        if totals == True or havingRun == True :          
            #print start, end, machines       
            rxNames, txNames = GeneralStatsLibraryMethods.getRxTxNamesHavingRunDuringPeriod( start, end, machines,None, havingrunOnAllMachines = True )
            mergerType = _("totalForMachine")
        else:#Build graphs only for currently runningclient/sources.      
            rxNames, txNames = GeneralStatsLibraryMethods.getRxTxNames( LOCAL_MACHINE, machines[0] )
            mergerType = _("group")
                     
        if fileType == _("tx"):    
            clientNames = txNames  
            #print clientNames
        else:
            clientNames = rxNames    
            
    else:
        if totals == True :  
            mergerType = _("regular")
    #--------------------------------------------------------------------- try :
            
    if fileType == _("tx"):       
    
        validTypes = [ _("latency"), _("bytecount"), _("errors"), _("filesOverMaxLatency"), _("filecount") ]
        
        if types[0] == _("All") :
            types = validTypes
        else :
            for t in types :
                if t not in validTypes:
                    raise Exception("%s is not a valid type" %t)
                    
    else:      
        
        validTypes = [ _("bytecount"), _("errors"), _("filecount") ]
        
        if types[0] == _("All"):
            types = validTypes
        
        else :
            for t in types :
                if t not in validTypes:
                        raise Exception("")

    #------------------------------------------------------------------- except:
        #----------------------------------------------------------- print types
        # print _("Error. With %s fileType, possible data types values are : %s.") %( fileType, validTypes )
        #---- print _("For multiple types use this syntax : -t 'type1','type2'")
        #-------------------------------- print _("Use -h for additional help.")
        #---------------------------------------- print _("Program terminated.")
        #------------------------------------------------------------ sys.exit()
  
            
    if individual != True :        
        combinedMachineName = ""
        for machine in machines:
            combinedMachineName = combinedMachineName + machine
                    
        machines = [ combinedMachineName ]              
         
                
    if len(clientNames) <1:
        print _("Error. No client/sources were found that matched the specified parameters") %( fileType, validTypes )
        print _("Verify parameters used, especially the machines parameter.")
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()


    if len(clientNames) <1:
        print _("Error. No client/sources were found that matched the specified parameters")
        print _("Verify parameters used, especially the machines parameter.")
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()  
    
    elif len(clientNames) == 1 and totals == True:   
        print _("Error. Cannot use totals option with only one client/source name.")
        print _("Either remove --total option or use more than one client/source..")
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()          
    
    end = StatsDateLib.getIsoWithRoundedHours( end )
    
    graphicsProducer = RRDGraphicProducer( startTime = start, endTime = end, graphicType = graphicType, clientNames = clientNames, types = types, timespan = timespan, machines = machines, fileType = fileType,\
                                           totals = totals, copy = copy, mergerType = mergerType,turnOffLogging = turnOffLogging, inputLanguage = LanguageTools.getMainApplicationLanguage() ,  outputLanguage = outputLanguage )   
            
    return graphicsProducer                       
Esempio n. 27
0
def updateRoundRobinDatabases(client,
                              machines,
                              fileType,
                              endTime,
                              logger=None):
    """
        @summary : This method updates every database linked to a certain client.
        
        @note : Database types are linked to the filetype associated with the client.
        
    """

    combinedMachineName = ""
    combinedMachineName = combinedMachineName.join(
        [machine for machine in machines])

    tempRRDFileName = RrdUtilities.buildRRDFileName(dataType=_("errors"),
                                                    clients=[client],
                                                    machines=machines,
                                                    fileType=fileType)
    startTime = RrdUtilities.getDatabaseTimeOfUpdate(tempRRDFileName, fileType)

    if startTime == 0:
        startTime = StatsDateLib.getSecondsSinceEpoch(
            StatsDateLib.getIsoTodaysMidnight(endTime))

    endTime = StatsDateLib.getSecondsSinceEpoch(endTime)

    timeSeperators = getTimeSeperatorsBasedOnAvailableMemory(
        StatsDateLib.getIsoFromEpoch(startTime),
        StatsDateLib.getIsoFromEpoch(endTime), [client], fileType, machines)

    for i in xrange(len(timeSeperators) - 1):

        dataPairs = getPairs([client],
                             machines,
                             fileType,
                             timeSeperators[i],
                             timeSeperators[i + 1],
                             groupName="",
                             logger=logger)

        for dataType in dataPairs:

            translatedDataType = LanguageTools.translateTerm(
                dataType, 'en', LanguageTools.getMainApplicationLanguage(),
                CURRENT_MODULE_ABS_PATH)

            rrdFileName = RrdUtilities.buildRRDFileName(
                dataType=translatedDataType,
                clients=[client],
                machines=machines,
                fileType=fileType)

            if not os.path.isfile(rrdFileName):
                createRoundRobinDatabase(databaseName=rrdFileName,
                                         startTime=startTime,
                                         dataType=dataType)

            if endTime > startTime:
                j = 0
                while dataPairs[dataType][j][0] < startTime:
                    j = j + 1

                for k in range(j, len(dataPairs[dataType])):
                    try:
                        rrdtool.update(
                            rrdFileName,
                            '%s:%s' % (int(dataPairs[dataType][k][0]),
                                       dataPairs[dataType][k][1]))
                    except:
                        if logger != None:
                            try:
                                logger.warning(
                                    "Could not update %s. Last update was more recent than %s "
                                    % (rrdFileName,
                                       int(dataPairs[dataType][k][0])))
                            except:
                                pass
                        pass

                if logger != None:
                    try:
                        logger.info(
                            _("Updated  %s db for %s in db named : %s") %
                            (dataType, client, rrdFileName))
                    except:
                        pass
            else:
                if logger != None:
                    try:
                        logger.warning(
                            _("This database was not updated since it's last update was more recent than specified date : %s"
                              ) % rrdFileName)
                    except:
                        pass

            RrdUtilities.setDatabaseTimeOfUpdate(rrdFileName, fileType,
                                                 endTime)
Esempio n. 28
0
 def convertTimeSeperator( seperator ) : return int( StatsDateLib.getSecondsSinceEpoch(seperator)  + 60 )
 
 timeSeperators = map( convertTimeSeperator, mergedData.statsCollection.timeSeperators )
Esempio n. 29
0
    def mergePicklesFromSameHour( logger = None , pickleNames = None, mergedPickleName = "",\
                                  clientName = "" , combinedMachineName = "", currentTime = "",\
                                  fileType = "tx" ):
        """
            @summary: This methods receives a list of filenames referring to pickled FileStatsEntries.
            
                      After the merger pickles get saved since they might be reused somewhere else.
            
            @precondition:  Pickle should be of the same timespan and bucket width.
                            If not no merging will occur.  
            
        """

        if logger != None:
            logger.debug(_("Call to mergePickles received."))
            logging = True
        else:
            logging = False

        entryList = []

        for pickle in pickleNames:  #for every pickle we eneed to merge

            if os.path.isfile(pickle):

                entryList.append(CpickleWrapper.load(pickle))

            else:  #Use empty entry if there is no existing pickle of that name

                endTime = StatsDateLib.getIsoFromEpoch(
                    StatsDateLib.getSecondsSinceEpoch(currentTime) +
                    StatsDateLib.HOUR)
                entryList.append(
                    FileStatsCollector(startTime=currentTime,
                                       endTime=endTime,
                                       logger=logger,
                                       logging=logging))

                if logger != None:
                    logger.warning(
                        _("Pickle named %s did not exist. Empty entry was used instead."
                          ) % pickle)

        #start off with a carbon copy of first pickle in list.
        newFSC = FileStatsCollector( files = entryList[0].files , statsTypes =  entryList[0].statsTypes, startTime = entryList[0].startTime,\
                                     endTime = entryList[0].endTime, interval=entryList[0].interval, totalWidth = entryList[0].totalWidth,\
                                     firstFilledEntry = entryList[0].firstFilledEntry, lastFilledEntry = entryList[0].lastFilledEntry,\
                                     maxLatency = entryList[0].maxLatency, fileEntries = entryList[0].fileEntries,logger = logger,\
                                     logging = logging )

        if PickleMerging.entryListIsValid(entryList) == True:

            for i in range(1, len(entryList)):  #add other entries

                for file in entryList[i].files:
                    if file not in newFSC.files:
                        newFSC.files.append(file)

                for j in range(len(newFSC.fileEntries)):  # add all entries

                    newFSC.fileEntries[j].values.productTypes.extend(
                        entryList[i].fileEntries[j].values.productTypes)
                    newFSC.fileEntries[j].files.extend(
                        entryList[i].fileEntries[j].files)
                    newFSC.fileEntries[j].times.extend(
                        entryList[i].fileEntries[j].times)
                    newFSC.fileEntries[j].nbFiles = newFSC.fileEntries[
                        j].nbFiles + (newFSC.fileEntries[j].nbFiles)

                    for type in newFSC.statsTypes:
                        newFSC.fileEntries[j].values.dictionary[type].extend(
                            entryList[i].fileEntries[j].values.dictionary[type]
                        )

                    newFSC.fileEntries[j].values.rows = newFSC.fileEntries[
                        j].values.rows + entryList[i].fileEntries[j].values.rows

            newFSC = newFSC.setMinMaxMeanMedians(
                startingBucket=0, finishingBucket=newFSC.nbEntries - 1)

        else:  #Did not merge pickles named. Pickle list was not valid."

            if logger != None:
                logger.warning(
                    _("Did not merge pickles named : %s. Pickle list was not valid."
                      ) % pickleNames)
                logger.warning(
                    _("Filled with empty entries instead.") % pickleNames)

            newFSC.fileEntries = PickleMerging.fillWithEmptyEntries(
                nbEmptyEntries=60, entries={})

        #prevents us from having ro remerge file later on.
        temp = newFSC.logger
        del newFSC.logger
        CpickleWrapper.save(newFSC, mergedPickleName)
        try:
            os.chmod(mergedPickleName, 0777)
        except:
            pass

        #print "saved :%s" %mergedPickleName
        newFSC.logger = temp

        return newFSC
Esempio n. 30
0
def updateHourlyPickles( infos, paths, logger = None ):
    """
        @summary : This method is to be used when hourly pickling is done. -1 pickle per hour per client. 
        
        This method needs will update the pickles by collecting data from the time of the last 
        pickle up to the current date.(System time or the one specified by the user.)
        
        If for some reason data wasnt collected for one or more hour since last pickle,pickles
        for the missing hours will be created and filled with data. 
        
        If no entries are found for this client in the pickled-times file, we take for granted that
        this is a new client. In that case data will be collected from the top of the hour up to the 
        time of the call.
        
        If new client has been producing data before the day of the first call, user can specify a 
        different time than system time to specify the first day to pickle. He can then call this 
        method with the current system time, and data between first day and current time will be 
        collected so that pickling can continue like the other clients can.
        
        
    """  
    
    sp = StatsPickler( logger = logger )
    
    pathToLogFiles = GeneralStatsLibraryMethods.getPathToLogFiles( LOCAL_MACHINE, infos.machine )
    
    for i in range( len (infos.clients) ) :
        
        sp.client = infos.clients[i]
        
        width = StatsDateLib.getSecondsSinceEpoch(infos.endTime) - StatsDateLib.getSecondsSinceEpoch( StatsDateLib.getIsoWithRoundedHours(infos.startTimes[i] ) ) 
        
        
        if width > StatsDateLib.HOUR :#In case pickling didnt happen for a few hours for some reason...   
            
            hours = [infos.startTimes[i]]
            hours.extend( StatsDateLib.getSeparatorsWithStartTime( infos.startTimes[i], interval = StatsDateLib.HOUR, width = width ))
            
            for j in range( len(hours)-1 ): #Covers hours where no pickling was done.                               
                
                startOfTheHour = StatsDateLib.getIsoWithRoundedHours( hours[j] )
                startTime = startOfTheHour        
                                                   
                endTime = StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch( StatsDateLib.getIsoWithRoundedHours(hours[j+1] ) ))
                #print " client : %s startTime : %s endTime : %s" %(infos.clients[i], startTime, endTime )
                
                if startTime >= endTime and logger != None :                                
                    try:
                        logger.warning( _("Startime used in updateHourlyPickles was greater or equal to end time.") )    
                    except:
                        pass    
                
                sp.pickleName =  StatsPickler.buildThisHoursFileName( client = infos.clients[i], currentTime =  startOfTheHour, machine = infos.machine, fileType = infos.fileType )
                 
                sp.collectStats( types = infos.types, startTime = startTime , endTime = endTime, interval = infos.interval * StatsDateLib.MINUTE,\
                                 directory = pathToLogFiles, fileType = infos.fileType )                     
                           
                    
        else:      
           
            startTime = infos.startTimes[i]
            endTime   = infos.endTime             
            startOfTheHour = StatsDateLib.getIsoWithRoundedHours( infos.startTimes[i] )
            #print " client : %s startTime : %s endTime : %s" %(infos.clients[i], startTime, endTime )               
            if startTime >= endTime and logger != None :#to be removed                
                try:
                    logger.warning( _("Startime used in updateHourlyPickles was greater or equal to end time.") )    
                except:
                    pass    
                
            sp.pickleName = StatsPickler.buildThisHoursFileName( client = infos.clients[i], currentTime = startOfTheHour, machine = infos.machine, fileType = infos.fileType )            
              
            sp.collectStats( infos.types, startTime = startTime, endTime = endTime, interval = infos.interval * StatsDateLib.MINUTE, directory = pathToLogFiles, fileType = infos.fileType )        
       
                         
        setLastUpdate( machine = infos.machine, client = infos.clients[i], fileType = infos.fileType, currentDate = infos.currentDate, paths = paths, collectUpToNow = infos.collectUpToNow )
Esempio n. 31
0
    def getSeperatorsForHourlyTreatments(startTime,
                                         endTime,
                                         currentFreeMemory,
                                         fileSizesPerHour,
                                         usage="rrd"):
        """
        
            @summary : returns a list of time seperators based on a list of file and 
                       the current amount of free memory. Each seperator represents the time 
                       associated with a certain hourly file. Each seperator will represent
                       the maximum amount of files that can be treated at the same time 
                       without busting the current memory. 
            
            @attention: List fo files MUST refer to hourly files. 
            
            @param startTime: Startime in iso format of the interval to work with.
            @param endTime: End time in iso format of the interval to work with.
            @param currentFreeMemory: Maximum amout of memory to use per seperation.
            @param fileSizesPerHour: size of the file(s) to be treated at every hour.
            
            @return: Returns the time seperators. 
                   
        """

        currentTotalFileSizes = 0
        currentTime = StatsDateLib.getSecondsSinceEpoch(startTime)
        seperators = [startTime]

        if fileSizesPerHour[0] < currentFreeMemory:

            for fileSizePerHour in fileSizesPerHour:
                currentTotalFileSizes = currentTotalFileSizes + fileSizePerHour

                if currentFreeMemory < currentTotalFileSizes:
                    seperators.append(
                        StatsDateLib.getIsoFromEpoch(currentTime))
                    currentTotalFileSizes = 0

                currentTime = currentTime + StatsDateLib.HOUR
        else:
            raise Exception(
                "Cannot build seperators. First file will not even fit within current available memory."
            )

        if seperators[len(seperators) - 1] != endTime:
            seperators.append(endTime)

        if len(seperators) > 2:  #If any "in between seperators were added"
            i = 1
            currentLength = len(seperators) - 1
            while i < currentLength:  #add 1 minute
                if usage == "rrd":
                    seperators.insert(
                        i + 1,
                        StatsDateLib.getIsoFromEpoch(
                            (StatsDateLib.getSecondsSinceEpoch(seperators[i]) +
                             StatsDateLib.MINUTE)))
                else:
                    seperators.insert(
                        i + 1,
                        StatsDateLib.getSecondsSinceEpoch(seperators[i]))
                currentLength = currentLength + 1
                i = i + 2

        return seperators
Esempio n. 32
0
def getLastUpdate( machine, client, fileType, currentDate, paths, collectUpToNow = False ):
    """
        @summary : Reads and returns the client's or source's last update.        
       
        @return : The client's or sources last update.   
    """ 
    
    times = {}
    lastUpdate = {}
    fileName = "%s%s_%s_%s" %( paths.STATSPICKLESTIMEOFUPDATES, fileType, client, machine )   
    
    if os.path.isfile( fileName ):
        try :
            fileHandle  = open( fileName, "r" )
            lastUpdate  = pickle.load( fileHandle )      
            fileHandle.close()
            
        except:
            print _("problematic file in loading : %s") %fileName
            lastUpdate = StatsDateLib.getIsoWithRoundedHours( StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch(currentDate ) - StatsDateLib.HOUR) )
            pass
            
        fileHandle.close()      
            
    
    else:#create a new pickle file.Set start of the pickle as last update.   
        if not os.path.isdir( os.path.dirname( fileName ) ) :
            os.makedirs( os.path.dirname( fileName ) ) 
            
        fileHandle  = open( fileName, "w" )        
    
        lastUpdate = StatsDateLib.getIsoWithRoundedHours( StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch(currentDate ) - StatsDateLib.HOUR) )
         
        pickle.dump( lastUpdate, fileHandle )
        
        fileHandle.close()
       

    return lastUpdate
Esempio n. 33
0
def updateRoundRobinDatabases(  client, machines, fileType, endTime, logger = None ):
    """
        @summary : This method updates every database linked to a certain client.
        
        @note : Database types are linked to the filetype associated with the client.
        
    """
            
    combinedMachineName = ""
    combinedMachineName = combinedMachineName.join( [machine for machine in machines ] )
    
    tempRRDFileName = RrdUtilities.buildRRDFileName( dataType = _("errors"), clients = [client], machines = machines, fileType = fileType)
    startTime   = RrdUtilities.getDatabaseTimeOfUpdate(  tempRRDFileName, fileType ) 
    
    if  startTime == 0 :
        startTime = StatsDateLib.getSecondsSinceEpoch( StatsDateLib.getIsoTodaysMidnight( endTime ) )
        
    
    endTime     = StatsDateLib.getSecondsSinceEpoch( endTime )           
        
    timeSeperators = getTimeSeperatorsBasedOnAvailableMemory(StatsDateLib.getIsoFromEpoch( startTime ), StatsDateLib.getIsoFromEpoch( endTime ), [client], fileType, machines ) 
    
    
    for i in xrange( len(timeSeperators) -1 ) :
        
        dataPairs   = getPairs( [client], machines, fileType, timeSeperators[i], timeSeperators[i+1] , groupName = "", logger = logger )

        for dataType in dataPairs:
            
            translatedDataType = LanguageTools.translateTerm(dataType, 'en', LanguageTools.getMainApplicationLanguage(), CURRENT_MODULE_ABS_PATH)
            
            rrdFileName = RrdUtilities.buildRRDFileName( dataType = translatedDataType, clients = [client], machines = machines, fileType = fileType )

            if not os.path.isfile( rrdFileName ):
                 createRoundRobinDatabase(  databaseName = rrdFileName , startTime= startTime, dataType = dataType )


            if endTime > startTime :
                j = 0 
                while dataPairs[ dataType ][j][0] < startTime:
                    j = j +1
                    
                for k in range ( j, len( dataPairs[ dataType ] )  ):
                    try:
                        rrdtool.update( rrdFileName, '%s:%s' %( int( dataPairs[ dataType ][k][0] ),  dataPairs[ dataType ][k][1] ) )
                    except:
                        if logger != None:
                            try:
                                logger.warning( "Could not update %s. Last update was more recent than %s " %( rrdFileName,int( dataPairs[ dataType ][k][0] ) ) )
                            except:
                                pass    
                        pass
                    
                    
                if logger != None :
                    try:        
                        logger.info( _( "Updated  %s db for %s in db named : %s" ) %( dataType, client, rrdFileName ) )
                    except:
                        pass        
            else:
                if logger != None :
                     try:
                         logger.warning( _( "This database was not updated since it's last update was more recent than specified date : %s" ) %rrdFileName )
                     except:
                         pass    
                
            RrdUtilities.setDatabaseTimeOfUpdate(  rrdFileName, fileType, endTime )  
Esempio n. 34
0
def updateGroupedRoundRobinDatabases( infos, logger = None ):    
    """
        @summary : This method is to be used to update the database 
                   used to stored the merged data of a group.
         
    """
    
    endTime     = StatsDateLib.getSecondsSinceEpoch( infos.endTime )     
    
    tempRRDFileName = RrdUtilities.buildRRDFileName( _("errors"), clients = infos.group, machines = infos.machines, fileType = infos.fileTypes[0]  )  
    startTime       = RrdUtilities.getDatabaseTimeOfUpdate(  tempRRDFileName, infos.fileTypes[0] )
    
   
    if startTime == 0 :        
        startTime = StatsDateLib.getSecondsSinceEpoch( StatsDateLib.getIsoTodaysMidnight( infos.endTime ) )
        
        
    timeSeperators = getTimeSeperatorsBasedOnAvailableMemory( StatsDateLib.getIsoFromEpoch( startTime ), StatsDateLib.getIsoFromEpoch( endTime ), infos.clients, infos.fileTypes[0], infos.machines )
    
    
    #print timeSeperators
    
    for i in xrange(0, len( timeSeperators ),2 ):#timeseperators should always be coming in pairs
        
        startTime = StatsDateLib.getSecondsSinceEpoch( timeSeperators[i] )
        dataPairs = getPairs( infos.clients, infos.machines, infos.fileTypes[0], timeSeperators[i], timeSeperators[i+1], infos.group, logger )
    
        for dataType in dataPairs:
            
            translatedDataType = LanguageTools.translateTerm(dataType, 'en', LanguageTools.getMainApplicationLanguage(), CURRENT_MODULE_ABS_PATH)
            rrdFileName = RrdUtilities.buildRRDFileName( dataType = translatedDataType, clients = infos.group, groupName = infos.group, machines =  infos.machines,fileType = infos.fileTypes[0], usage = "group" )
            
            if not os.path.isfile( rrdFileName ):
                createRoundRobinDatabase( rrdFileName,  startTime, dataType )
            
            if endTime >  startTime  :
                j = 0 
                while dataPairs[ dataType ][j][0] < startTime and j < len( dataPairs[ dataType ] ):
                    #print "going over : %s startime was :%s" %(dataPairs[ dataType ][j][0], startTime)
                    j = j +1
                    
                for k in range ( j, len( dataPairs[ dataType ] )  ):
                    #print "updating %s at %s" %(rrdFileName, int( dataPairs[ dataType ][k][0] ))
                    try:
                        rrdtool.update( rrdFileName, '%s:%s' %( int( dataPairs[ dataType ][k][0] ),  dataPairs[ dataType ][k][1] ) )
                    except:
                        if logger != None:
                            try:
                                logger.warning( "Could not update %s. Last update was more recent than %s " %( rrdFileName,int( dataPairs[ dataType ][k][0] ) ) )
                            except:
                                pass    
                        pass    
            
            else:
                #print "endTime %s was not bigger than start time %s" %( endTime, startTime ) 
                if logger != None :
                    try:
                        logger.warning( _( "This database was not updated since it's last update was more recent than specified date : %s" ) %rrdFileName )
                    except:
                        pass
                        
    RrdUtilities.setDatabaseTimeOfUpdate( tempRRDFileName, infos.fileTypes[0], endTime )         
Esempio n. 35
0
    def getRxTxNamesHavingRunDuringPeriod(start,
                                          end,
                                          machines,
                                          pattern=None,
                                          havingrunOnAllMachines=False):
        """
            Browses all the rrd database directories to find 
            the time of the last update of each databases.
            
            If database was updated between start and end 
            and the client or source is from the specified 
            machine, the name of the client or source is 
            added to rxNames or txNames.
            
            
        """

        rxNames = []
        txNames = []
        txOnlyDatabases = []
        rxTxDatabases = []

        combinedMachineName = ""
        start = StatsDateLib.getSecondsSinceEpoch(start)
        end = StatsDateLib.getSecondsSinceEpoch(end)

        if havingrunOnAllMachines == False:
            for machine in machines:

                rxTxDatabasesLongNames = glob.glob(
                    _("%sbytecount/*_*%s*") %
                    (STATSPATHS.STATSCURRENTDB, machine))
                txOnlyDatabasesLongNames = glob.glob(
                    _("%slatency/*_*%s*") %
                    (STATSPATHS.STATSCURRENTDB, machine))

                #Keep only client/source names.
                for rxtxLongName in rxTxDatabasesLongNames:
                    if pattern == None:
                        if rxtxLongName not in rxTxDatabases:
                            rxTxDatabases.append(rxtxLongName)
                    else:

                        if fnmatch.fnmatch(os.path.basename(rxtxLongName),
                                           pattern):
                            if rxtxLongName not in rxTxDatabases:
                                rxTxDatabases.append(rxtxLongName)

                for txLongName in txOnlyDatabasesLongNames:
                    if pattern == None:
                        if txLongName not in txOnlyDatabases:
                            txOnlyDatabases.append(txLongName)
                    else:
                        if fnmatch.fnmatch(os.path.basename(txLongName),
                                           pattern):
                            if txLongName not in txOnlyDatabases:
                                txOnlyDatabases.append(txLongName)

        else:
            for machine in machines:
                combinedMachineName = combinedMachineName + machine

            rxTxDatabasesLongNames = glob.glob(
                _("%sbytecount/*_%s*") %
                (STATSPATHS.STATSCURRENTDB, combinedMachineName))
            txOnlyDatabasesLongNames = glob.glob(
                _("%slatency/*_%s*") %
                (STATSPATHS.STATSCURRENTDB, combinedMachineName))

            #Keep only client/source names.
            for rxtxLongName in rxTxDatabasesLongNames:
                if pattern == None:
                    rxTxDatabases.append(rxtxLongName)
                else:
                    if fnmatch.fnmatch(os.path.basename(rxtxLongName),
                                       pattern):
                        rxTxDatabases.append(rxtxLongName)

            for txLongName in txOnlyDatabasesLongNames:
                if pattern == None:
                    txOnlyDatabases.append(txLongName)
                else:
                    if fnmatch.fnmatch(os.path.basename(txLongName), pattern):
                        txOnlyDatabases.append(txLongName)

        rxOnlyDatabases = filter(lambda x: x not in txOnlyDatabases,
                                 rxTxDatabases)

        for rxDatabase in rxOnlyDatabases:
            lastUpdate = RrdUtilities.getDatabaseTimeOfUpdate(rxDatabase, "rx")
            if lastUpdate >= start:
                #fileName format is ../path/rxName_machineName
                rxDatabase = os.path.basename(rxDatabase)
                rxDatabase = rxDatabase.split(
                    "_%s" % (rxDatabase.split('_')[-1:][0]))[0]
                rxNames.append(rxDatabase)

        for txDatabase in txOnlyDatabases:
            lastUpdate = RrdUtilities.getDatabaseTimeOfUpdate(txDatabase, "tx")

            if lastUpdate >= start:

                txDatabase = os.path.basename(txDatabase)
                txDatabase = txDatabase.split(
                    "_%s" % (txDatabase.split('_')[-1:][0]))[0]
                txNames.append(txDatabase)

        rxNames = filter(GeneralStatsLibraryMethods.filterGroupNames, rxNames)
        txNames = filter(GeneralStatsLibraryMethods.filterGroupNames, txNames)

        try:
            rxNames.remove('rx')
        except:
            pass
        try:
            txNames.remove('tx')
        except:
            pass

        rxNames.sort()
        txNames.sort()

        return rxNames, txNames
Esempio n. 36
0
 def getPairs( self, clientCount , statType, typeCount  ):
     """
        
        @summary : This method is used to create the data couples used to draw the graphic.
                   Couples are a combination of the data previously gathered and the time
                   at wich data was produced.  
        
        @note:    One point per pair will generally be drawn on the graphic but
                  certain graph types might combine a few pairs before drawing only 
                  one point for the entire combination.
               
        @warning: If illegal statype is found program will be terminated here.       
        
        @todo: Using dictionaries instead of arrays might speed thinga up a bit.
         
     """
     
     if self.logger != None: 
         _ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.outputLanguage )
         self.logger.debug( _("Call to getPairs received.") )
     
     k = 0 
     pairs = []
     total = 0
     self.nbFiles[clientCount]  = 0
     self.nbErrors[clientCount] = 0
     self.nbFilesOverMaxLatency[clientCount] = 0
     nbEntries = len( self.stats[clientCount].statsCollection.timeSeperators )-1 
     
     translatedStatType = LanguageTools.translateTerm(statType, self.workingLanguage, "en", CURRENT_MODULE_ABS_PATH)
     
     if nbEntries !=0:
         
         total = 0
                         
         self.minimums[clientCount][typeCount] = 100000000000000000000 #huge integer
         self.maximums[clientCount][typeCount] = None
         self.filesWhereMaxOccured[clientCount][typeCount] =  "" 
         self.timeOfMax[clientCount][typeCount] = ""
         
         for k in range( 0, nbEntries ):
             
             try :
                 
                 if len( self.stats[clientCount].statsCollection.fileEntries[k].means ) >=1 :
                         
                     #special manipulation for each type                    
                     if translatedStatType == "latency":
                         self.nbFilesOverMaxLatency[clientCount] = self.nbFilesOverMaxLatency[ clientCount ] + self.stats[clientCount].statsCollection.fileEntries[k].filesOverMaxLatency    
                 
                     elif translatedStatType == "bytecount":
                         self.totalNumberOfBytes[clientCount] =  self.totalNumberOfBytes[clientCount] +    self.stats[clientCount].statsCollection.fileEntries[k].totals[translatedStatType]
                     
                     
                     elif translatedStatType == "errors":
                                                 #calculate total number of errors
                         self.nbErrors[clientCount] = self.nbErrors[clientCount] + self.stats[clientCount].statsCollection.fileEntries[k].totals[translatedStatType] 
                     
                       
                     #add to pairs    
                     if translatedStatType == "errors" or translatedStatType == "bytecount": #both use totals     
                         pairs.append( [StatsDateLib.getSecondsSinceEpoch(self.stats[clientCount].statsCollection.timeSeperators[k]), self.stats[clientCount].statsCollection.fileEntries[k].totals[translatedStatType]] )
                                            
                         #print    StatsDateLib.getSecondsSinceEpoch(self.stats[clientCount].statsCollection.timeSeperators[k]), self.stats[clientCount].statsCollection.fileEntries[k].totals[translatedStatType]                        
                     
                     elif translatedStatType == "filecount":
                         pairs.append( [StatsDateLib.getSecondsSinceEpoch(self.stats[clientCount].statsCollection.timeSeperators[k]), self.stats[clientCount].statsCollection.fileEntries[k].nbFiles ]  )
                     
                     else:#latency uses means
                         
                         pairs.append( [ StatsDateLib.getSecondsSinceEpoch(self.stats[clientCount].statsCollection.timeSeperators[k]), self.stats[clientCount].statsCollection.fileEntries[k].means[translatedStatType]] )
                         
                         #print self.stats[clientCount].statsCollection.timeSeperators[k], self.stats[clientCount].statsCollection.fileEntries[k].means[translatedStatType]
                     
                     if translatedStatType == "filecount":
                         
                         if self.stats[clientCount].statsCollection.fileEntries[k].nbFiles > self.maximums[clientCount][typeCount] :
                             self.maximums[clientCount][typeCount] =  self.stats[clientCount].statsCollection.fileEntries[k].nbFiles
                             self.timeOfMax[clientCount][typeCount] = self.stats[clientCount].statsCollection.fileEntries[k].startTime
                         
                         elif self.stats[clientCount].statsCollection.fileEntries[k].nbFiles < self.minimums[clientCount][typeCount] :                           
                             self.minimums[clientCount][typeCount] = self.stats[clientCount].statsCollection.fileEntries[k].nbFiles
                     
                     
                     elif( self.stats[clientCount].statsCollection.fileEntries[k].maximums[translatedStatType]  > self.maximums[clientCount][typeCount] ) :
                         
                         self.maximums[clientCount][typeCount] =  self.stats[clientCount].statsCollection.fileEntries[k].maximums[translatedStatType]
                         
                         self.timeOfMax[clientCount][typeCount] = self.stats[clientCount].statsCollection.fileEntries[k].timesWhereMaxOccured[translatedStatType]
                         
                         self.filesWhereMaxOccured[clientCount][typeCount] = self.stats[clientCount].statsCollection.fileEntries[k].filesWhereMaxOccured[translatedStatType]
                     
                         
                     elif self.stats[clientCount].statsCollection.fileEntries[k].minimums[translatedStatType] < self.minimums[clientCount][typeCount] :      
                         
                         if not ( translatedStatType == "bytecount" and  self.stats[clientCount].statsCollection.fileEntries[k].minimums[translatedStatType] == 0 ):
                             self.minimums[clientCount][typeCount] = self.stats[clientCount].statsCollection.fileEntries[k].minimums[translatedStatType]
                                                 
                     self.nbFiles[clientCount]  = self.nbFiles[clientCount]  + self.stats[clientCount].statsCollection.fileEntries[k].nbFiles   
                
                           
                 else:
                
                     pairs.append( [ StatsDateLib.getSecondsSinceEpoch(self.stats[clientCount].statsCollection.timeSeperators[k]), 0.0 ] )
             
             
             except KeyError, instance:
                 #print instance
                 _ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.workingLanguage )
                 self.logger.error( _("Error in getPairs.") )
                 self.logger.error( _("The %s stat type was not found in previously collected data.") %statType )    
                 pairs.append( [ StatsDateLib.getSecondsSinceEpoch(self.stats[clientCount].statsCollection.timeSeperators[k]), 0.0 ] )
                 pass    
             
             
             total = total + pairs[k][1]            
         
         self.means[clientCount][typeCount] = (total / (k+1) ) 
         
         
         if self.nbFiles[clientCount] != 0 :
             self.ratioOverLatency[clientCount]  = float( float(self.nbFilesOverMaxLatency[clientCount]) / float(self.nbFiles[clientCount]) ) *100.0
         
         if self.minimums[clientCount][typeCount] == 100000000000000000000 :
             self.minimums[clientCount][typeCount] = None
         
         #print pairs 
                    
         return pairs    
Esempio n. 37
0
    def mergePicklesFromDifferentHours( logger = None , startTime = "2006-07-31 13:00:00",\
                                        endTime = "2006-07-31 19:00:00", client = "satnet",\
                                        machine = "pdsPM", fileType = "tx" ):
        """
            @summary : This method merges entire hourly pickles files together. 
            
            @None    : This does not support merging part of the data of pickles.   
        
        """

        if logger != None:
            logger.debug(_("Call to mergeHourlyPickles received."))
            logging = True
        else:
            logging = False

        pickles = []
        entries = {}
        width = StatsDateLib.getSecondsSinceEpoch(
            endTime) - StatsDateLib.getSecondsSinceEpoch(startTime)
        startTime = StatsDateLib.getIsoWithRoundedHours(startTime)

        seperators = [startTime]
        seperators.extend(
            StatsDateLib.getSeparatorsWithStartTime(startTime=startTime,
                                                    width=width,
                                                    interval=60 *
                                                    StatsDateLib.MINUTE)[:-1])

        for seperator in seperators:
            pickles.append(
                StatsPickler.buildThisHoursFileName(client=client,
                                                    offset=0,
                                                    currentTime=seperator,
                                                    machine=machine,
                                                    fileType=fileType))

        startingNumberOfEntries = 0
        #print "prior to loading and merging pickles : %s " %( StatsDateLib.getIsoFromEpoch( time.time() ) )
        for pickle in pickles:

            if os.path.isfile(pickle):

                tempCollection = CpickleWrapper.load(pickle)
                if tempCollection != None:
                    for i in xrange(len(tempCollection.fileEntries)):
                        entries[startingNumberOfEntries +
                                i] = tempCollection.fileEntries[i]
                    startingNumberOfEntries = startingNumberOfEntries + len(
                        tempCollection.fileEntries)
                else:
                    sys.exit()
            else:

                emptyEntries = PickleMerging.fillWithEmptyEntries(
                    nbEmptyEntries=60, entries={})
                for i in xrange(60):
                    entries[i + startingNumberOfEntries] = emptyEntries[i]
                startingNumberOfEntries = startingNumberOfEntries + 60

        #print "after the  loading and merging og pickles : %s " %( StatsDateLib.getIsoFromEpoch( time.time() ) )

        statsCollection = FileStatsCollector(startTime=startTime,
                                             endTime=endTime,
                                             interval=StatsDateLib.MINUTE,
                                             totalWidth=width,
                                             fileEntries=entries,
                                             fileType=fileType,
                                             logger=logger,
                                             logging=logging)

        return statsCollection
Esempio n. 38
0
 def createCopy( self, copyToArchive = True , copyToColumbo = True ):
     """
         @summary : Creates a copy of the created image file so that it
                    easily be used in px's columbo or other daily image program. 
         
         @summary : If copies are to be needed for graphcis other than daily graphs, 
                    please modify this method accordingly!
         
     """
     
     statsPaths = StatsPaths()
     statsPaths.setPaths(self.outputLanguage)
     
     _ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.outputLanguage )
     
     src = self.imageName
     
     if self.groupName != "":            
         
         clientName = self.groupName 
             
     else:   
             
         clientName = ""
                
         if len( self.clientNames ) == 0:
             clientName = self.clientNames[0]
         else:
             for name in self.clientNames :
                 clientName = clientName + name  
                 if name != self.clientNames[ len(self.clientNames) -1 ] :
                     clientName = clientName + "-" 
     
     
     StatsDateLib.setLanguage( self.outputLanguage ) # Makes sure month is in the right language.
     year, month, day = StatsDateLib.getYearMonthDayInStrfTime( StatsDateLib.getSecondsSinceEpoch( self.currentTime ) - 60 ) # -60 means that a graphic ending at midnight
     StatsDateLib.setLanguage( self.workingLanguage )# Sets language back to working language.                               # would be named after the rpevious day.
         
                                     
     if copyToArchive == True : 
         destination = statsPaths.STATSGRAPHSARCHIVES + _("daily/%s/%s/") %( self.fileType, clientName ) + str(year) + "/" + str(month) + "/" + str(day) + ".png"
                     
         if not os.path.isdir( os.path.dirname( destination ) ):
             os.makedirs(  os.path.dirname( destination ), 0777 )   
             dirname = os.path.dirname( destination )                                                  
             
             while( dirname != statsPaths.STATSGRAPHSARCHIVES[:-1] ):#[:-1] removes the last / character 
                 
                 try:
                     os.chmod( dirname, 0777 )
                 except:
                     pass
                 
                 dirname = os.path.dirname(dirname)
                 
                         
         shutil.copy( src, destination ) 
         
         try:
             os.chmod( destination, 0777 )
         except:
             pass
         #print "cp %s %s  "  %( src, destination )
     
     
     
     if copyToColumbo == True : 
         destination = statsPaths.STATSGRAPHS + _("webGraphics/columbo/%s_%s.png") %(clientName,self.outputLanguage)
         if not os.path.isdir( os.path.dirname( destination ) ):
             os.makedirs(  os.path.dirname( destination ), 0777 )                                                      
             os.chmod( os.path.dirname( destination ), 0777 )
         
         shutil.copy( src, destination ) 
         try:
             os.chmod( destination, 0777 )
         except:
             pass
Esempio n. 39
0
    def mergePicklesFromDifferentSources( logger = None , startTime = "2006-07-31 13:00:00",\
                                          endTime = "2006-07-31 19:00:00", clients = ["someclient"],\
                                          fileType = "tx", machines = [], groupName = "" ):
        """
            @summary : This method allows user to merge pickles coming from numerous machines
                       covering as many hours as wanted, into a single FileStatsCollector entry.
            
                       Very usefull when creating graphics on a central server with pickle files coming from 
                       remote locations.
            
        """

        combinedMachineName = ""
        combinedClientName = ""

        combinedMachineName = combinedMachineName.join(
            [machine for machine in machines])
        combinedClientName = combinedClientName.join(
            [client for client in clients])

        if groupName != "":
            clientsForVersionManagement = groupName
        else:
            clientsForVersionManagement = clients

        vc = PickleVersionChecker()

        vc.getClientsCurrentFileList(clients)

        vc.getSavedList(user=combinedMachineName,
                        clients=clientsForVersionManagement)

        width = StatsDateLib.getSecondsSinceEpoch(
            endTime) - StatsDateLib.getSecondsSinceEpoch(startTime)
        startTime = StatsDateLib.getIsoWithRoundedHours(startTime)

        seperators = [startTime]
        seperators.extend(
            StatsDateLib.getSeparatorsWithStartTime(startTime=startTime,
                                                    width=width,
                                                    interval=60 *
                                                    StatsDateLib.MINUTE)[:-1])

        mergedPickleNames =  PickleMerging.createMergedPicklesList(  startTime = startTime, endTime = endTime, machines = machines,\
                                                                     fileType = fileType, clients = clients, groupName = groupName,\
                                                                     seperators = seperators ) #Resulting list of the merger.

        for i in xrange(len(mergedPickleNames)):  #for every merger needed

            needToMergeSameHoursPickle = False
            pickleNames = PickleMerging.createNonMergedPicklesList(
                currentTime=seperators[i],
                machines=machines,
                fileType=fileType,
                clients=clients)

            if not os.path.isfile(mergedPickleNames[i]):
                needToMergeSameHoursPickle = True
            else:

                for pickle in pickleNames:  #Verify every pickle implicated in merger.
                    # if for some reason pickle has changed since last time
                    if vc.isDifferentFile(
                            file=pickle,
                            user=combinedMachineName,
                            clients=clientsForVersionManagement) == True:

                        needToMergeSameHoursPickle = True
                        break

            if needToMergeSameHoursPickle == True:  #First time or one element has changed

                PickleMerging.mergePicklesFromSameHour( logger = logger , pickleNames = pickleNames , clientName = combinedClientName,\
                                                        combinedMachineName = combinedMachineName, currentTime = seperators[i],\
                                                        mergedPickleName = mergedPickleNames[i], fileType = fileType  )

                for pickle in pickleNames:
                    vc.updateFileInList(file=pickle)

                vc.saveList(user=combinedMachineName,
                            clients=clientsForVersionManagement)

        # Once all machines have merges the necessary pickles we merge all pickles
        # into a single file stats entry.
        if groupName != "":
            nameToUseForMerger = groupName
        else:
            nameToUseForMerger = ""
            nameToUseForMerger = nameToUseForMerger.join(
                [client for client in clients])

        newFSC =  PickleMerging.mergePicklesFromDifferentHours( logger = logger , startTime = startTime, endTime = endTime, client = nameToUseForMerger,\
                                                                machine = combinedMachineName,fileType = fileType  )

        return newFSC
Esempio n. 40
0
def getPairsFromMergedData( statType, mergedData, logger = None  ):
    """
        This method is used to create the data couples used to feed an rrd database.
        
    """
    
    pairs = []        
    nbEntries = len( mergedData.statsCollection.timeSeperators ) - 1     
    
    def convertTimeSeperator( seperator ) : return int( StatsDateLib.getSecondsSinceEpoch(seperator)  + 60 )
    
    timeSeperators = map( convertTimeSeperator, mergedData.statsCollection.timeSeperators )
    fileEntries = mergedData.statsCollection.fileEntries
    
    if nbEntries !=0:        
       
         
        for i in xrange( 0, nbEntries ):
            
            try :
                    
                if len( mergedData.statsCollection.fileEntries[i].means ) >=1 :
                    
                    if statType == "filesOverMaxLatency" :
                        pairs.append( [ timeSeperators[i], fileEntries[i].filesOverMaxLatency ] )                      
                    
                    elif statType == "errors":
                        
                        pairs.append( [ timeSeperators[i], fileEntries[i].totals[statType]] )
                    
                    elif statType == "bytecount":
                    
                        pairs.append( [ timeSeperators[i], fileEntries[i].totals[statType]] )
                        
                    elif statType == "latency":
                    
                        pairs.append( [ timeSeperators[i], fileEntries[i].means[statType]] )                          
                    
                    elif statType == "filecount":
                        pairs.append( [ timeSeperators[i], len( fileEntries[i].values.productTypes ) ] )
                    
                    else:

                        pairs.append( [ timeSeperators[i], 0.0 ])                    
                
                else:      
                                                      
                    pairs.append( [ timeSeperators[i], 0.0 ] )
            
            
            except KeyError:
                if logger != None :  
                    try:                  
                        logger.error( _("Error in getPairs.") )
                        logger.error( _("The %s stat type was not found in previously collected data.") %statType )    
                    except:
                        pass    
                pairs.append( [ int(StatsDateLib.getSecondsSinceEpoch(mergedData.statsCollection.timeSeperators[i])) +60, 0.0 ] )
                sys.exit()    
            
               
        return pairs 
Esempio n. 41
0
def updateGroupedRoundRobinDatabases(infos, logger=None):
    """
        @summary : This method is to be used to update the database 
                   used to stored the merged data of a group.
         
    """

    endTime = StatsDateLib.getSecondsSinceEpoch(infos.endTime)

    tempRRDFileName = RrdUtilities.buildRRDFileName(
        _("errors"),
        clients=infos.group,
        machines=infos.machines,
        fileType=infos.fileTypes[0])
    startTime = RrdUtilities.getDatabaseTimeOfUpdate(tempRRDFileName,
                                                     infos.fileTypes[0])

    if startTime == 0:
        startTime = StatsDateLib.getSecondsSinceEpoch(
            StatsDateLib.getIsoTodaysMidnight(infos.endTime))

    timeSeperators = getTimeSeperatorsBasedOnAvailableMemory(
        StatsDateLib.getIsoFromEpoch(startTime),
        StatsDateLib.getIsoFromEpoch(endTime), infos.clients,
        infos.fileTypes[0], infos.machines)

    #print timeSeperators

    for i in xrange(0, len(timeSeperators),
                    2):  #timeseperators should always be coming in pairs

        startTime = StatsDateLib.getSecondsSinceEpoch(timeSeperators[i])
        dataPairs = getPairs(infos.clients, infos.machines, infos.fileTypes[0],
                             timeSeperators[i], timeSeperators[i + 1],
                             infos.group, logger)

        for dataType in dataPairs:

            translatedDataType = LanguageTools.translateTerm(
                dataType, 'en', LanguageTools.getMainApplicationLanguage(),
                CURRENT_MODULE_ABS_PATH)
            rrdFileName = RrdUtilities.buildRRDFileName(
                dataType=translatedDataType,
                clients=infos.group,
                groupName=infos.group,
                machines=infos.machines,
                fileType=infos.fileTypes[0],
                usage="group")

            if not os.path.isfile(rrdFileName):
                createRoundRobinDatabase(rrdFileName, startTime, dataType)

            if endTime > startTime:
                j = 0
                while dataPairs[dataType][j][0] < startTime and j < len(
                        dataPairs[dataType]):
                    #print "going over : %s startime was :%s" %(dataPairs[ dataType ][j][0], startTime)
                    j = j + 1

                for k in range(j, len(dataPairs[dataType])):
                    #print "updating %s at %s" %(rrdFileName, int( dataPairs[ dataType ][k][0] ))
                    try:
                        rrdtool.update(
                            rrdFileName,
                            '%s:%s' % (int(dataPairs[dataType][k][0]),
                                       dataPairs[dataType][k][1]))
                    except:
                        if logger != None:
                            try:
                                logger.warning(
                                    "Could not update %s. Last update was more recent than %s "
                                    % (rrdFileName,
                                       int(dataPairs[dataType][k][0])))
                            except:
                                pass
                        pass

            else:
                #print "endTime %s was not bigger than start time %s" %( endTime, startTime )
                if logger != None:
                    try:
                        logger.warning(
                            _("This database was not updated since it's last update was more recent than specified date : %s"
                              ) % rrdFileName)
                    except:
                        pass

    RrdUtilities.setDatabaseTimeOfUpdate(tempRRDFileName, infos.fileTypes[0],
                                         endTime)
Esempio n. 42
0
def getOptionsFromParser(parser):
    """
        @summary: Parses and validates the options found in the parser. 
        
        @return: If information was found to be valid, return options
    
    """

    infos = None
    date = []

    (options, args) = parser.parse_args()
    machines = options.machines.replace(' ', '').split(',')
    date = options.date.replace('"', '').replace("'", '')
    fileType = options.fileType.replace("'", '')
    daily = options.daily
    weekly = options.weekly
    monthly = options.monthly
    yearly = options.yearly
    fixedCurrent = options.fixedCurrent
    fixedPrevious = options.fixedPrevious
    turnOffLogging = options.turnOffLogging
    includeGroups = options.includeGroups
    machinesAreClusters = options.machinesAreClusters
    outputLanguage = options.outputLanguage

    if fixedPrevious and fixedCurrent:
        print _(
            "Error. Please use only one of the fixed options,either fixedPrevious or fixedCurrent. "
        )
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()

    counter = 0
    specialParameters = [daily, monthly, weekly, yearly]
    for specialParameter in specialParameters:
        if specialParameter:
            counter = counter + 1

    if counter > 1:
        print _(
            "Error. Only one of the daily, weekly and yearly options can be use at a time "
        )
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()

    elif counter == 0:
        print _("Error. Please use either the -d -m -w or -y options. ")
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()

    try:  # Makes sure date is of valid format.
        # Makes sure only one space is kept between date and hour.
        t = time.strptime(date, '%Y-%m-%d %H:%M:%S')
        split = date.split()
        date = "%s %s" % (split[0], split[1])

    except:
        print _("Error. The date format must be YYYY-MM-DD HH:MM:SS")
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()

    #TODO :fixStartEnd method???
    if fixedPrevious:
        if daily:
            span = "daily"
            graphicType = "daily"
            start, end = StatsDateLib.getStartEndFromPreviousDay(date)
        elif weekly:
            span = "weekly"
            graphicType = "weekly"
            start, end = StatsDateLib.getStartEndFromPreviousWeek(date)
        elif monthly:
            span = "monthly"
            graphicType = "monthly"
            start, end = StatsDateLib.getStartEndFromPreviousMonth(date)
        elif yearly:
            span = "yearly"
            graphicType = "yearly"
            start, end = StatsDateLib.getStartEndFromPreviousYear(date)
        timeSpan = int(
            StatsDateLib.getSecondsSinceEpoch(end) -
            StatsDateLib.getSecondsSinceEpoch(start)) / 3600

    elif fixedCurrent:
        if daily:
            span = "daily"
            graphicType = "daily"
            start, end = StatsDateLib.getStartEndFromCurrentDay(date)
        elif weekly:
            span = "weekly"
            graphicType = "weekly"
            start, end = StatsDateLib.getStartEndFromCurrentWeek(date)
        elif monthly:
            span = "monthly"
            graphicType = "monthly"
            start, end = StatsDateLib.getStartEndFromCurrentMonth(date)
        elif yearly:
            span = "yearly"
            graphicType = "yearly"
            start, end = StatsDateLib.getStartEndFromCurrentYear(date)
        timeSpan = int(
            StatsDateLib.getSecondsSinceEpoch(end) -
            StatsDateLib.getSecondsSinceEpoch(start)) / 3600

    else:
        #TODO fix span method???
        if daily:
            timeSpan = 24
            graphicType = "daily"
            span = "daily"
        elif weekly:
            timeSpan = 24 * 7
            graphicType = "weekly"
            span = "weekly"
        elif monthly:
            timeSpan = 24 * 30
            graphicType = "monthly"
            span = "monthly"
        elif yearly:
            timeSpan = 24 * 365
            graphicType = "yearly"
            span = "yearly"

        start = StatsDateLib.getIsoFromEpoch(
            StatsDateLib.getSecondsSinceEpoch(date) - timeSpan * 60 * 60)
        end = date

    if fileType != "tx" and fileType != "rx":
        print _("Error. File type must be either tx or rx.")
        print _('Multiple types are not accepted.')
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()

    if includeGroups == True:
        configParameters = StatsConfigParameters()
        configParameters.getAllParameters()
        groups = configParameters.groupParameters.groups
        machinesToSearch = machines[:]  #Forces a copy and nota reference.
        for machine in machines:
            if machinesAreClusters == True:
                machineConfig = MachineConfigParameters()
                machineConfig.getParametersFromMachineConfigurationFile()
                machinesAssociatedWith = machineConfig.getMachinesAssociatedWith(
                    machine)
                machinesToTest = str(machinesAssociatedWith).replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).\
                                 replace(" ",'').replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')

            for group in groups:
                groupsMachine =  str( configParameters.groupParameters.groupsMachines[group] ).replace('[','').replace(']', '').\
                                 replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')
                #print   "machinesToTest %s groupsMachine %s" %(machinesToTest,groupsMachine )
                if machinesToTest in groupsMachine:
                    if groupsMachine not in machinesToSearch:
                        machinesToSearch.append(groupsMachine)

    #print machines
    infos = _CsvInfos( start = start , end = end  , span = span, timeSpan = timeSpan, fileType = fileType, machinesForLabels = machines,\
                       machinesToSearch = machinesToSearch, machinesAreClusters = machinesAreClusters, dataSource = "databases", outputLanguage = outputLanguage )

    return infos
Esempio n. 43
0
 def convertTimeSeperator(seperator):
     return int(StatsDateLib.getSecondsSinceEpoch(seperator) + 60)
Esempio n. 44
0
def getDataFromDatabases( sourlients, dataTypes, infos ):
    """
        @summary: Gathers up all the requried data from allthe concerned databases 
    
        @param sourlients: List of sources clients for wich we need to gather up data.
        
        @param machines: Machines on which the clients reside.
        
        @param dataTypes: Datatypes for which we need to collect data.
        
        @return : Return the data dictionary filled with all the collected data.
        
    """

    
    data = {}
    
    
    for sourlient in sourlients.keys() :
        data[sourlient] = {}
        
        sourlientsMachines = sourlients[sourlient]
            
        for machine in infos.machinesToSearch :
            
            if infos.machinesAreClusters == True:
                machineConfig = MachineConfigParameters()
                machineConfig.getParametersFromMachineConfigurationFile()
                machines = machineConfig.getMachinesAssociatedWith( machine ) 
                oldMachine = machine
                machine = str(machines).replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')\
                          .replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')           
            
            if machine == '':
                #print "trouvaille !!!"
                machine = oldMachine
                
            if machine in sourlientsMachines:
                data[sourlient][machine] = {}
                
                for dataType in dataTypes :
                    
                    if infos.outputLanguage != 'en' :
                        translatedDataType = LanguageTools.translateDataType( dataType, "en", infos.outputLanguage )
                    else  :
                        translatedDataType = dataType   
                    
                    databaseName = RrdUtilities.buildRRDFileName( dataType = translatedDataType, clients=  [sourlient], machines = [machine], fileType = infos.fileType )
 
                    if not os.path.isfile( databaseName ):
                        if infos.includegroups == True:
                            databaseName = RrdUtilities.buildRRDFileName(dataType = translatedDataType, groupName = sourlient, machines = [machine], fileType = infos.fileType, usage = "group" )

                    lastUpdate = RrdUtilities.getDatabaseTimeOfUpdate( databaseName, infos.fileType )        
                                        
                    fetchedInterval = getInterval( int(StatsDateLib.getSecondsSinceEpoch(infos.start)), lastUpdate, dataType, goal = "fetchData"  )  
                    desiredInterval = getInterval( int(StatsDateLib.getSecondsSinceEpoch(infos.start)), lastUpdate, dataType, goal = "plotGraphic"  )
                    interval        = desiredInterval     
                    minimum, maximum, mean, total = getGraphicsMinMaxMeanTotal( databaseName, int(StatsDateLib.getSecondsSinceEpoch(infos.start)),\
                                                                                int(StatsDateLib.getSecondsSinceEpoch(infos.end)), infos.span,\
                                                                                fetchedInterval,desiredInterval, type = "average" )
                    data[sourlient][machine][dataType] = {}
                    data[sourlient][machine][dataType]["min"]   = minimum
                    data[sourlient][machine][dataType]["max"]   = maximum
                    data[sourlient][machine][dataType]["mean"]  = mean
                    data[sourlient][machine][dataType]["total"] = total
                     

    return data
Esempio n. 45
0
def getPairsFromMergedData(statType, mergedData, logger=None):
    """
        This method is used to create the data couples used to feed an rrd database.
        
    """

    pairs = []
    nbEntries = len(mergedData.statsCollection.timeSeperators) - 1

    def convertTimeSeperator(seperator):
        return int(StatsDateLib.getSecondsSinceEpoch(seperator) + 60)

    timeSeperators = map(convertTimeSeperator,
                         mergedData.statsCollection.timeSeperators)
    fileEntries = mergedData.statsCollection.fileEntries

    if nbEntries != 0:

        for i in xrange(0, nbEntries):

            try:

                if len(mergedData.statsCollection.fileEntries[i].means) >= 1:

                    if statType == "filesOverMaxLatency":
                        pairs.append([
                            timeSeperators[i],
                            fileEntries[i].filesOverMaxLatency
                        ])

                    elif statType == "errors":

                        pairs.append([
                            timeSeperators[i], fileEntries[i].totals[statType]
                        ])

                    elif statType == "bytecount":

                        pairs.append([
                            timeSeperators[i], fileEntries[i].totals[statType]
                        ])

                    elif statType == "latency":

                        pairs.append([
                            timeSeperators[i], fileEntries[i].means[statType]
                        ])

                    elif statType == "filecount":
                        pairs.append([
                            timeSeperators[i],
                            len(fileEntries[i].values.productTypes)
                        ])

                    else:

                        pairs.append([timeSeperators[i], 0.0])

                else:

                    pairs.append([timeSeperators[i], 0.0])

            except KeyError:
                if logger != None:
                    try:
                        logger.error(_("Error in getPairs."))
                        logger.error(
                            _("The %s stat type was not found in previously collected data."
                              ) % statType)
                    except:
                        pass
                pairs.append([
                    int(
                        StatsDateLib.getSecondsSinceEpoch(
                            mergedData.statsCollection.timeSeperators[i])) +
                    60, 0.0
                ])
                sys.exit()

        return pairs
Esempio n. 46
0
def getOptionsFromParser( parser ):
    """
        @summary: Parses and validates the options found in the parser. 
        
        @return: If information was found to be valid, return options
    
    """
    
    infos = None 
    date   = []
    
    ( options, args )= parser.parse_args()        
    machines         = options.machines.replace( ' ','').split(',')
    date             = options.date.replace('"','').replace("'",'')
    fileType         = options.fileType.replace("'",'')
    daily            = options.daily
    weekly           = options.weekly
    monthly          = options.monthly
    yearly           = options.yearly    
    fixedCurrent     = options.fixedCurrent
    fixedPrevious    = options.fixedPrevious
    turnOffLogging   = options.turnOffLogging
    includeGroups    = options.includeGroups
    machinesAreClusters = options.machinesAreClusters
    outputLanguage      = options.outputLanguage
    
    
    
    if fixedPrevious and fixedCurrent:
        print _("Error. Please use only one of the fixed options,either fixedPrevious or fixedCurrent. " )
        print _("Use -h for help.")
        print _("Program terminated.")
        sys.exit()  
    
    counter = 0  
    specialParameters = [daily, monthly, weekly, yearly]
    for specialParameter in specialParameters:
        if specialParameter:
            counter = counter + 1 
            
    if counter > 1 :
        print _( "Error. Only one of the daily, weekly and yearly options can be use at a time " )
        print _( "Use -h for help." )
        print _( "Program terminated." )
        sys.exit()
        
    elif counter == 0:    
        print _( "Error. Please use either the -d -m -w or -y options. " )
        print _( "Use -h for help." )
        print _( "Program terminated." )
        sys.exit()
         



    try: # Makes sure date is of valid format. 
         # Makes sure only one space is kept between date and hour.
        t =  time.strptime( date, '%Y-%m-%d %H:%M:%S' )
        split = date.split()
        date = "%s %s" %( split[0], split[1] )

    except:    
        print _( "Error. The date format must be YYYY-MM-DD HH:MM:SS" )
        print _( "Use -h for help." )
        print _( "Program terminated." )
        sys.exit()         
        
         
    #TODO :fixStartEnd method???    
    if fixedPrevious :
        if daily :
            span = "daily"
            graphicType = "daily"
            start, end = StatsDateLib.getStartEndFromPreviousDay( date )             
        elif weekly:
            span = "weekly"
            graphicType = "weekly"
            start, end = StatsDateLib.getStartEndFromPreviousWeek( date )
        elif monthly:
            span = "monthly"
            graphicType = "monthly"
            start, end = StatsDateLib.getStartEndFromPreviousMonth( date )
        elif yearly:
            span = "yearly" 
            graphicType = "yearly" 
            start, end = StatsDateLib.getStartEndFromPreviousYear( date )
        timeSpan = int( StatsDateLib.getSecondsSinceEpoch( end ) - StatsDateLib.getSecondsSinceEpoch( start ) ) / 3600
             
    elif fixedCurrent:
        if daily :
            span = "daily"
            graphicType = "daily"
            start, end = StatsDateLib.getStartEndFromCurrentDay( date )   
        elif weekly:
            span = "weekly"
            graphicType = "weekly"
            start, end = StatsDateLib.getStartEndFromCurrentWeek( date )
        elif monthly:
            span = "monthly"
            graphicType = "monthly"
            start, end = StatsDateLib.getStartEndFromCurrentMonth( date )    
        elif yearly:
            span = "yearly" 
            graphicType = "yearly" 
            start, end = StatsDateLib.getStartEndFromCurrentYear( date ) 
        timeSpan = int( StatsDateLib.getSecondsSinceEpoch( end ) - StatsDateLib.getSecondsSinceEpoch( start ) ) / 3600
        
    else:       
        #TODO fix span method???   
        if daily :
            timeSpan = 24  
            graphicType = "daily"  
            span = "daily"    
        elif weekly:
            timeSpan = 24 * 7  
            graphicType = "weekly" 
            span = "weekly" 
        elif monthly:
            timeSpan = 24 * 30 
            graphicType = "monthly"
            span = "monthly"       
        elif yearly:            
            timeSpan = 24 * 365
            graphicType = "yearly"  
            span = "yearly"
            
        start = StatsDateLib.getIsoFromEpoch( StatsDateLib.getSecondsSinceEpoch( date ) - timeSpan*60*60 ) 
        end   = date                       
            
     
         
    if fileType != "tx" and fileType != "rx":
        print _("Error. File type must be either tx or rx.")
        print _('Multiple types are not accepted.' )
        print _("Use -h for additional help.")
        print _("Program terminated.")
        sys.exit()    

    if includeGroups == True:
        configParameters = StatsConfigParameters()
        configParameters.getAllParameters()
        groups = configParameters.groupParameters.groups
        machinesToSearch = machines[:]#Forces a copy and nota reference.
        for machine in machines:
            if machinesAreClusters == True :
                machineConfig = MachineConfigParameters()
                machineConfig.getParametersFromMachineConfigurationFile()
                machinesAssociatedWith = machineConfig.getMachinesAssociatedWith( machine ) 
                machinesToTest = str(machinesAssociatedWith).replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).\
                                 replace(" ",'').replace('[','').replace(']', '').replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')       
            
             
            for group in groups:
                groupsMachine =  str( configParameters.groupParameters.groupsMachines[group] ).replace('[','').replace(']', '').\
                                 replace(',','').replace( "'",'' ).replace('"','' ).replace(" ",'')
                #print   "machinesToTest %s groupsMachine %s" %(machinesToTest,groupsMachine ) 
                if machinesToTest in groupsMachine :
                    if groupsMachine not in machinesToSearch:
                        machinesToSearch.append(groupsMachine)
    
    
    #print machines
    infos = _CsvInfos( start = start , end = end  , span = span, timeSpan = timeSpan, fileType = fileType, machinesForLabels = machines,\
                       machinesToSearch = machinesToSearch, machinesAreClusters = machinesAreClusters, dataSource = "databases", outputLanguage = outputLanguage )    
    
    return infos
Esempio n. 47
0
 def mergePicklesFromDifferentSources( logger = None , startTime = "2006-07-31 13:00:00",\
                                       endTime = "2006-07-31 19:00:00", clients = ["someclient"],\
                                       fileType = "tx", machines = [], groupName = "" ):
     """
         @summary : This method allows user to merge pickles coming from numerous machines
                    covering as many hours as wanted, into a single FileStatsCollector entry.
         
                    Very usefull when creating graphics on a central server with pickle files coming from 
                    remote locations.
         
     """          
        
     combinedMachineName = ""
     combinedClientName  = ""
     
     
     combinedMachineName = combinedMachineName.join( [machine for machine in machines ] )
     combinedClientName  = combinedClientName.join( [client for client in clients] )
     
     if groupName !="":
         clientsForVersionManagement = groupName 
     else:
         clientsForVersionManagement = clients
     
     vc  = PickleVersionChecker()    
        
     vc.getClientsCurrentFileList( clients )    
         
     vc.getSavedList( user = combinedMachineName, clients = clientsForVersionManagement )           
    
     width = StatsDateLib.getSecondsSinceEpoch( endTime ) - StatsDateLib.getSecondsSinceEpoch( startTime )
     startTime = StatsDateLib.getIsoWithRoundedHours( startTime )
     
     seperators = [startTime]
     seperators.extend( StatsDateLib.getSeparatorsWithStartTime( startTime = startTime , width=width, interval=60*StatsDateLib.MINUTE )[:-1])
         
     mergedPickleNames =  PickleMerging.createMergedPicklesList(  startTime = startTime, endTime = endTime, machines = machines,\
                                                                  fileType = fileType, clients = clients, groupName = groupName,\
                                                                  seperators = seperators ) #Resulting list of the merger.
        
     
     for i in xrange( len( mergedPickleNames ) ) : #for every merger needed
             
             needToMergeSameHoursPickle = False 
             pickleNames = PickleMerging.createNonMergedPicklesList( currentTime = seperators[i], machines = machines, fileType = fileType, clients = clients )
             
             if not os.path.isfile( mergedPickleNames[i] ):                
                 needToMergeSameHoursPickle = True 
             else:    
                 
                 for pickle in pickleNames : #Verify every pickle implicated in merger.
                     # if for some reason pickle has changed since last time                    
                     if vc.isDifferentFile( file = pickle, user = combinedMachineName, clients = clientsForVersionManagement ) == True :                                
                        
                         needToMergeSameHoursPickle = True 
                         break
                         
             
             if needToMergeSameHoursPickle == True :#First time or one element has changed   
                 
                 PickleMerging.mergePicklesFromSameHour( logger = logger , pickleNames = pickleNames , clientName = combinedClientName,\
                                                         combinedMachineName = combinedMachineName, currentTime = seperators[i],\
                                                         mergedPickleName = mergedPickleNames[i], fileType = fileType  )
                                     
                 for pickle in pickleNames :
                     vc.updateFileInList( file = pickle )                                               
                 
                 vc.saveList( user = combinedMachineName, clients = clientsForVersionManagement )
                 
                 
                         
     # Once all machines have merges the necessary pickles we merge all pickles 
     # into a single file stats entry. 
     if groupName !="":
         nameToUseForMerger = groupName 
     else:
         nameToUseForMerger = ""
         nameToUseForMerger = nameToUseForMerger.join( [ client for client in clients] )
     
     newFSC =  PickleMerging.mergePicklesFromDifferentHours( logger = logger , startTime = startTime, endTime = endTime, client = nameToUseForMerger,\
                                                             machine = combinedMachineName,fileType = fileType  )
    
     return newFSC
    def getRxTxNamesHavingRunDuringPeriod( start, end, machines, pattern = None, havingrunOnAllMachines = False  ):  
        """
            Browses all the rrd database directories to find 
            the time of the last update of each databases.
            
            If database was updated between start and end 
            and the client or source is from the specified 
            machine, the name of the client or source is 
            added to rxNames or txNames.
            
            
        """
        
        rxNames = []
        txNames = []
        txOnlyDatabases = []
        rxTxDatabases = []
        
        
        combinedMachineName = ""
        start = StatsDateLib.getSecondsSinceEpoch(start)
        end = StatsDateLib.getSecondsSinceEpoch(end)
        
        if havingrunOnAllMachines == False:
            for machine in machines:           
                    
                rxTxDatabasesLongNames = glob.glob( _("%sbytecount/*_*%s*") %( STATSPATHS.STATSCURRENTDB, machine ) )
                txOnlyDatabasesLongNames = glob.glob( _("%slatency/*_*%s*") %( STATSPATHS.STATSCURRENTDB, machine )   )
            
                
                #Keep only client/source names.
                for rxtxLongName in rxTxDatabasesLongNames:
                    if pattern == None:
                        if rxtxLongName not in rxTxDatabases:
                            rxTxDatabases.append( rxtxLongName )
                    else:
                        
                        if fnmatch.fnmatch(os.path.basename(rxtxLongName), pattern ):
                            if rxtxLongName not in rxTxDatabases:
                                rxTxDatabases.append( rxtxLongName )
                 
                    
                for txLongName in txOnlyDatabasesLongNames:
                    if pattern == None:
                        if txLongName not in txOnlyDatabases:
                            txOnlyDatabases.append( txLongName )    
                    else:               
                        if fnmatch.fnmatch(os.path.basename(txLongName), pattern):                
                            if txLongName not in txOnlyDatabases:
                                txOnlyDatabases.append( txLongName )  
        
        
        else:
            for machine in machines:
                combinedMachineName = combinedMachineName + machine
    
            rxTxDatabasesLongNames = glob.glob( _("%sbytecount/*_%s*") %( STATSPATHS.STATSCURRENTDB, combinedMachineName ) )
            txOnlyDatabasesLongNames = glob.glob( _("%slatency/*_%s*") %( STATSPATHS.STATSCURRENTDB, combinedMachineName )   )
    
    
            #Keep only client/source names.
            for rxtxLongName in rxTxDatabasesLongNames:
                if pattern == None:
                    rxTxDatabases.append( rxtxLongName )
                else:
                    if fnmatch.fnmatch(os.path.basename(rxtxLongName), pattern ):
                        rxTxDatabases.append( rxtxLongName )
    
    
            for txLongName in txOnlyDatabasesLongNames:
                if pattern == None:
                    txOnlyDatabases.append( txLongName )
                else:
                    if fnmatch.fnmatch(os.path.basename(txLongName), pattern):
                        txOnlyDatabases.append( txLongName )    
        
        rxOnlyDatabases = filter( lambda x: x not in txOnlyDatabases, rxTxDatabases )    
                    
            
            
            
        for rxDatabase in rxOnlyDatabases:  
            lastUpdate = RrdUtilities.getDatabaseTimeOfUpdate( rxDatabase, "rx" )
            if lastUpdate >= start:
                #fileName format is ../path/rxName_machineName     
                rxDatabase = os.path.basename( rxDatabase )                
                rxDatabase = rxDatabase.split( "_%s" %( rxDatabase.split('_')[-1:][0] ) )[0]       
                rxNames.append( rxDatabase  )
            
        for txDatabase in txOnlyDatabases:                
            lastUpdate = RrdUtilities.getDatabaseTimeOfUpdate( txDatabase, "tx" )

            if lastUpdate >= start:
                
                txDatabase = os.path.basename( txDatabase )
                txDatabase = txDatabase.split("_%s" %( txDatabase.split('_')[-1:][0] ) )[0]     
                txNames.append( txDatabase )    
       
        
        rxNames = filter( GeneralStatsLibraryMethods.filterGroupNames, rxNames )        
        txNames = filter( GeneralStatsLibraryMethods.filterGroupNames, txNames )
        
                
        try:
            rxNames.remove('rx')    
        except:
            pass    
        try:
            txNames.remove('tx')
        except:
            pass
    
        
        rxNames.sort()
        txNames.sort()
       
        return rxNames, txNames