def __init__(self):
     monitoringType = "DataOperation"
     # Will use the `MonitoringBackends/Default` value as monitoring backend unless a flag for `MonitoringBackends/DataOperation` is set.
     self.monitoringOptions = Operations().getMonitoringBackends(
         monitoringType)
     if "Monitoring" in self.monitoringOptions:
         self.dataOperationReporter = MonitoringReporter(monitoringType)
     if "Accounting" in self.monitoringOptions:
         self.dataOp = DataOperation()
Exemple #2
0
  def __sendAccounting( ftsJob, ownerDN ):
    """ prepare and send DataOperation to AccouringDB """

    dataOp = DataOperation()
    dataOp.setStartTime( fromString( ftsJob.SubmitTime ) )
    dataOp.setEndTime( fromString( ftsJob.LastUpdate ) )

    accountingDict = dict()
    accountingDict["OperationType"] = "ReplicateAndRegister"

    username = getUsernameForDN( ownerDN )
    if not username["OK"]:
      username = ownerDN
    else:
      username = username["Value"]

    accountingDict["User"] = username
    accountingDict["Protocol"] = "FTS"

    # accountingDict['RegistrationTime'] = 0
    # accountingDict['RegistrationOK'] = 0
    # accountingDict['RegistrationTotal'] = 0

    accountingDict["TransferOK"] = len( [ f for f in ftsJob if f.Status == "Finished" ] )
    accountingDict["TransferTotal"] = len( ftsJob )
    accountingDict["TransferSize"] = ftsJob.Size
    accountingDict["FinalStatus"] = ftsJob.Status
    accountingDict["Source"] = ftsJob.SourceSE
    accountingDict["Destination"] = ftsJob.TargetSE

    dt = ftsJob.LastUpdate - ftsJob.SubmitTime
    transferTime = dt.days * 86400 + dt.seconds
    accountingDict["TransferTime"] = transferTime
    dataOp.setValuesFromDict( accountingDict )
    dataOp.commit()
Exemple #3
0
  def __sendAccounting(ftsJob):
    """ prepare and send DataOperation to AccountingDB

        :param ftsJob: the FTS3Job from which we send the accounting info
    """

    dataOp = DataOperation()
    dataOp.setStartTime(fromString(ftsJob.submitTime))
    dataOp.setEndTime(fromString(ftsJob.lastUpdate))

    dataOp.setValuesFromDict(ftsJob.accountingDict)
    dataOp.delayedCommit()
    def __initialiseAccountingObject(self, destSE, successful):
        """ create accouting record """
        accountingDict = {}

        accountingDict['OperationType'] = 'fileAccess'
        accountingDict['User'] = '******'
        accountingDict['Protocol'] = 'xroot'
        accountingDict['RegistrationTime'] = 0.0
        accountingDict['RegistrationOK'] = 0
        accountingDict['RegistrationTotal'] = 0
        accountingDict['Destination'] = destSE
        accountingDict['TransferTotal'] = 1
        accountingDict['TransferOK'] = 1 if successful else 0
        accountingDict['TransferSize'] = 0
        accountingDict['TransferTime'] = 0.0
        accountingDict[
            'FinalStatus'] = 'Successful' if successful else 'Failed'
        accountingDict['Source'] = self.siteName
        oDataOperation = DataOperation()
        oDataOperation.setValuesFromDict(accountingDict)

        if 'StartTime' in self.step_commons:
            oDataOperation.setStartTime(self.step_commons['StartTime'])
            oDataOperation.setEndTime(self.step_commons['StartTime'])

        return oDataOperation
Exemple #5
0
  def __sendAccounting(ftsJob):
    """ prepare and send DataOperation to AccountingDB

        :param ftsJob: the FTS3Job from which we send the accounting info
    """

    dataOp = DataOperation()
    dataOp.setStartTime(fromString(ftsJob.submitTime))
    dataOp.setEndTime(fromString(ftsJob.lastUpdate))

    dataOp.setValuesFromDict(ftsJob.accountingDict)
    dataOp.delayedCommit()
def createAccountingRecord():
  accountingDict = {}
  accountingDict['OperationType'] = 'putAndRegister'
  accountingDict['User'] = '******'
  accountingDict['Protocol'] = 'DataManager'
  accountingDict['RegistrationTime'] = 0.0
  accountingDict['RegistrationOK'] = 0
  accountingDict['RegistrationTotal'] = 0
  accountingDict['Destination'] = 'se'
  accountingDict['TransferTotal'] = 1
  accountingDict['TransferOK'] = 1
  accountingDict['TransferSize'] = 1
  accountingDict['TransferTime'] = 0.0
  accountingDict['FinalStatus'] = 'Successful'
  accountingDict['Source'] = 'testSite'
  oDataOperation = DataOperation()
  oDataOperation.setValuesFromDict( accountingDict )
  return oDataOperation
def createAccountingRecord():
    accountingDict = {}
    accountingDict['OperationType'] = 'putAndRegister'
    accountingDict['User'] = '******'
    accountingDict['Protocol'] = 'DataManager'
    accountingDict['RegistrationTime'] = 0.0
    accountingDict['RegistrationOK'] = 0
    accountingDict['RegistrationTotal'] = 0
    accountingDict['Destination'] = 'se'
    accountingDict['TransferTotal'] = 1
    accountingDict['TransferOK'] = 1
    accountingDict['TransferSize'] = 1
    accountingDict['TransferTime'] = 0.0
    accountingDict['FinalStatus'] = 'Successful'
    accountingDict['Source'] = 'testSite'
    oDataOperation = DataOperation()
    oDataOperation.setValuesFromDict(accountingDict)
    return oDataOperation
Exemple #8
0
def createDataOperationAccountingRecord():
    accountingDict = {}
    accountingDict["OperationType"] = "putAndRegister"
    accountingDict["User"] = "******"
    accountingDict["Protocol"] = "DataManager"
    accountingDict["RegistrationTime"] = 0.0
    accountingDict["RegistrationOK"] = 0
    accountingDict["RegistrationTotal"] = 0
    accountingDict["Destination"] = "se"
    accountingDict["TransferTotal"] = 1
    accountingDict["TransferOK"] = 1
    accountingDict["TransferSize"] = 1
    accountingDict["TransferTime"] = 0.0
    accountingDict["FinalStatus"] = "Successful"
    accountingDict["Source"] = "testSite"
    oDataOperation = DataOperation()
    oDataOperation.setValuesFromDict(accountingDict)
    return oDataOperation
Exemple #9
0
 def __sendAccounting(self, regSuc, regTotal, regTime, transEndTime):
     transSuc = 0
     transSize = 0
     missingSize = []
     for lfn in self.fileDict.keys():
         if self.fileDict[lfn].get('Status') == 'Finished':
             transSuc += 1
             if not self.catalogMetadata.has_key(lfn):
                 missingSize.append(lfn)
     if missingSize:
         self.__updateMetadataCache(missingSize)
     for lfn in self.fileDict.keys():
         if self.fileDict[lfn].get('Status') == 'Finished':
             transSize += self.catalogMetadata[lfn]['Size']
     transTotal = 0
     for state in (self.statusSummary.keys()):
         transTotal += self.statusSummary[state]
     submitTime = fromString(self.submitTime)
     endTime = fromString(transEndTime)
     oAccounting = DataOperation()
     #oAccounting.setEndTime(endTime)
     oAccounting.setEndTime(transEndTime)
     oAccounting.setStartTime(submitTime)
     accountingDict = {}
     accountingDict['OperationType'] = 'replicateAndRegister'
     accountingDict['User'] = '******'
     accountingDict['Protocol'] = 'FTS'
     accountingDict['RegistrationTime'] = regTime
     accountingDict['RegistrationOK'] = regSuc
     accountingDict['RegistrationTotal'] = regTotal
     accountingDict['TransferOK'] = transSuc
     accountingDict['TransferTotal'] = transTotal
     accountingDict['TransferSize'] = transSize
     accountingDict['FinalStatus'] = self.requestStatus
     accountingDict['Source'] = self.sourceSE
     accountingDict['Destination'] = self.targetSE
     c = transEndTime - submitTime
     transferTime = c.days * 86400 + c.seconds
     accountingDict['TransferTime'] = transferTime
     oAccounting.setValuesFromDict(accountingDict)
     gLogger.verbose("Attempting to commit accounting message...")
     oAccounting.commit()
     gLogger.verbose("...committed.")
     return S_OK()
Exemple #10
0
  def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
    """ send accounting record

    :param self: self reference
    :param regSuc: number of files successfully registered
    :param regTotal: number of files attepted to register
    :param regTime: time stamp at the end of registration
    :param transEndTime: time stamp at the end of FTS job
    :param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
    """

    submitTime = fromString( self.submitTime )
    oAccounting = DataOperation()
    dt = transEndTime - submitTime
    transferTime = dt.days * 86400 + dt.seconds
    if 'fts3' in self.ftsServer and transferTime < 0:
      import datetime
      while transferTime < 0:
        # Shift by one hour until transfer time is positive (ugly fix for FTS3 bug)
        transferTime += 3600
        submitTime -= datetime.timedelta( 0, 3600 )
      self.log.verbose( 'Fixed UTC submit time... Submit: %s, end: %s' % ( submitTime, transEndTime ) )
    oAccounting.setEndTime( transEndTime )
    oAccounting.setStartTime( submitTime )

    accountingDict = {}
    accountingDict['OperationType'] = 'replicateAndRegister'
    result = getProxyInfo()
    if not result['OK']:
      userName = '******'
    else:
      userName = result['Value'].get( 'username', 'unknown' )
    accountingDict['User'] = userName
    accountingDict['Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
    accountingDict['RegistrationTime'] = regTime
    accountingDict['RegistrationOK'] = regSuc
    accountingDict['RegistrationTotal'] = regTotal
    accountingDict['TransferOK'] = transDict['transOK']
    accountingDict['TransferTotal'] = transDict['transTotal']
    accountingDict['TransferSize'] = transDict['transSize']
    accountingDict['FinalStatus'] = self.requestStatus
    accountingDict['Source'] = self.sourceSE
    accountingDict['Destination'] = self.targetSE
    accountingDict['TransferTime'] = transferTime
    oAccounting.setValuesFromDict( accountingDict )
    self.log.verbose( "Attempting to commit accounting message..." )
    oAccounting.commit()
    self.log.verbose( "...committed." )
    return S_OK()
Exemple #11
0
    def __sendAccounting(self, regSuc, regTotal, regTime, transEndTime,
                         transDict):
        """ send accounting record

    :param self: self reference
    :param regSuc: number of files successfully registered
    :param regTotal: number of files attepted to register
    :param regTime: time stamp at the end of registration
    :param transEndTime: time stamp at the end of FTS job
    :param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
    """

        oAccounting = DataOperation()
        oAccounting.setEndTime(transEndTime)
        oAccounting.setStartTime(self.submitTime)

        accountingDict = {}
        accountingDict['OperationType'] = 'replicateAndRegister'
        result = getProxyInfo()
        if not result['OK']:
            userName = '******'
        else:
            userName = result['Value'].get('username', 'unknown')
        accountingDict['User'] = userName
        accountingDict[
            'Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
        accountingDict['RegistrationTime'] = regTime
        accountingDict['RegistrationOK'] = regSuc
        accountingDict['RegistrationTotal'] = regTotal
        accountingDict['TransferOK'] = transDict['transOK']
        accountingDict['TransferTotal'] = transDict['transTotal']
        accountingDict['TransferSize'] = transDict['transSize']
        accountingDict['FinalStatus'] = self.requestStatus
        accountingDict['Source'] = self.sourceSE
        accountingDict['Destination'] = self.targetSE
        accountingDict['TransferTime'] = self.transferTime
        oAccounting.setValuesFromDict(accountingDict)
        self.log.verbose("Attempting to commit accounting message...")
        oAccounting.commit()
        self.log.verbose("...committed.")
        return S_OK()
Exemple #12
0
    def __sendAccounting(ftsJob, ownerDN):
        """ prepare and send DataOperation to AccouringDB """

        dataOp = DataOperation()
        dataOp.setStartTime(fromString(ftsJob.SubmitTime))
        dataOp.setEndTime(fromString(ftsJob.LastUpdate))

        accountingDict = dict()
        accountingDict["OperationType"] = "ReplicateAndRegister"

        username = getUsernameForDN(ownerDN)
        if not username["OK"]:
            username = ownerDN
        else:
            username = username["Value"]

        accountingDict["User"] = username
        accountingDict[
            "Protocol"] = "FTS3" if 'fts3' in ftsJob.FTSServer.lower(
            ) else 'FTS'
        accountingDict['ExecutionSite'] = ftsJob.FTSServer

        accountingDict['RegistrationTime'] = ftsJob._regTime
        accountingDict['RegistrationOK'] = ftsJob._regSuccess
        accountingDict['RegistrationTotal'] = ftsJob._regTotal

        accountingDict["TransferOK"] = len(
            [f for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES])
        accountingDict["TransferTotal"] = len(ftsJob)
        accountingDict["TransferSize"] = ftsJob.Size - ftsJob.FailedSize
        accountingDict["FinalStatus"] = ftsJob.Status
        accountingDict["Source"] = ftsJob.SourceSE
        accountingDict["Destination"] = ftsJob.TargetSE

        dt = ftsJob.LastUpdate - ftsJob.SubmitTime
        transferTime = dt.days * 86400 + dt.seconds
        accountingDict["TransferTime"] = transferTime
        # accountingDict['TransferTime'] = sum( [f._duration for f in ftsJob])
        dataOp.setValuesFromDict(accountingDict)
        dataOp.commit()
Exemple #13
0
 def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime ):
   transSuc = 0
   transSize = 0
   missingSize = []
   for lfn in self.fileDict.keys():
     if self.fileDict[lfn].get( 'Status' ) == 'Finished':
       transSuc += 1
       if not self.catalogMetadata.has_key( lfn ):
         missingSize.append( lfn )
   if missingSize:
     self.__updateMetadataCache( missingSize )
   for lfn in self.fileDict.keys():
     if self.fileDict[lfn].get( 'Status' ) == 'Finished':
       transSize += self.catalogMetadata[lfn]['Size']
   transTotal = 0
   for state in ( self.statusSummary.keys() ):
     transTotal += self.statusSummary[state]
   submitTime = fromString( self.submitTime )
   endTime = fromString( transEndTime )
   oAccounting = DataOperation()
   #oAccounting.setEndTime(endTime)
   oAccounting.setEndTime( transEndTime )
   oAccounting.setStartTime( submitTime )
   accountingDict = {}
   accountingDict['OperationType'] = 'replicateAndRegister'
   accountingDict['User'] = '******'
   accountingDict['Protocol'] = 'FTS'
   accountingDict['RegistrationTime'] = regTime
   accountingDict['RegistrationOK'] = regSuc
   accountingDict['RegistrationTotal'] = regTotal
   accountingDict['TransferOK'] = transSuc
   accountingDict['TransferTotal'] = transTotal
   accountingDict['TransferSize'] = transSize
   accountingDict['FinalStatus'] = self.requestStatus
   accountingDict['Source'] = self.sourceSE
   accountingDict['Destination'] = self.targetSE
   c = transEndTime - submitTime
   transferTime = c.days * 86400 + c.seconds
   accountingDict['TransferTime'] = transferTime
   oAccounting.setValuesFromDict( accountingDict )
   gLogger.verbose( "Attempting to commit accounting message..." )
   oAccounting.commit()
   gLogger.verbose( "...committed." )
   return S_OK()
Exemple #14
0
    def __sendAccounting(self, regSuc, regTotal, regTime, transEndTime,
                         transDict):
        """ send accounting record

    :param self: self reference
    :param regSuc: number of files successfully registered
    :param regTotal: number of files attepted to register 
    :param regTime: time stamp at the end of registration 
    :param transEndTime: time stamp at the end of FTS job
    :param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers 
    """

        submitTime = fromString(self.submitTime)
        oAccounting = DataOperation()
        oAccounting.setEndTime(transEndTime)
        oAccounting.setStartTime(submitTime)

        accountingDict = {}
        accountingDict['OperationType'] = 'replicateAndRegister'
        accountingDict['User'] = '******'
        accountingDict['Protocol'] = 'FTS'
        accountingDict['RegistrationTime'] = regTime
        accountingDict['RegistrationOK'] = regSuc
        accountingDict['RegistrationTotal'] = regTotal
        accountingDict['TransferOK'] = transDict['transOK']
        accountingDict['TransferTotal'] = transDict['transTotal']
        accountingDict['TransferSize'] = transDict['transSize']
        accountingDict['FinalStatus'] = self.requestStatus
        accountingDict['Source'] = self.sourceSE
        accountingDict['Destination'] = self.targetSE
        dt = transEndTime - submitTime
        transferTime = dt.days * 86400 + dt.seconds
        accountingDict['TransferTime'] = transferTime
        oAccounting.setValuesFromDict(accountingDict)
        self.log.verbose("Attempting to commit accounting message...")
        oAccounting.commit()
        self.log.verbose("...committed.")
        return S_OK()
Exemple #15
0
    def __sendAccounting(ftsJob, ownerDN):
        """ prepare and send DataOperation to AccouringDB """

        dataOp = DataOperation()
        dataOp.setStartTime(fromString(ftsJob.SubmitTime))
        dataOp.setEndTime(fromString(ftsJob.LastUpdate))

        accountingDict = dict()
        accountingDict["OperationType"] = "ReplicateAndRegister"

        username = getUsernameForDN(ownerDN)
        if not username["OK"]:
            username = ownerDN
        else:
            username = username["Value"]

        accountingDict["User"] = username
        accountingDict["Protocol"] = "FTS"

        # accountingDict['RegistrationTime'] = 0
        # accountingDict['RegistrationOK'] = 0
        # accountingDict['RegistrationTotal'] = 0

        accountingDict["TransferOK"] = len(
            [f for f in ftsJob if f.Status == "Finished"])
        accountingDict["TransferTotal"] = len(ftsJob)
        accountingDict["TransferSize"] = ftsJob.Size
        accountingDict["FinalStatus"] = ftsJob.Status
        accountingDict["Source"] = ftsJob.SourceSE
        accountingDict["Destination"] = ftsJob.TargetSE

        dt = ftsJob.LastUpdate - ftsJob.SubmitTime
        transferTime = dt.days * 86400 + dt.seconds
        accountingDict["TransferTime"] = transferTime
        dataOp.setValuesFromDict(accountingDict)
        dataOp.commit()
Exemple #16
0
  def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
    """ send accounting record

    :param self: self reference
    :param regSuc: number of files successfully registered
    :param regTotal: number of files attepted to register
    :param regTime: time stamp at the end of registration
    :param transEndTime: time stamp at the end of FTS job
    :param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
    """

    oAccounting = DataOperation()
    oAccounting.setEndTime( transEndTime )
    oAccounting.setStartTime( self.submitTime )

    accountingDict = {}
    accountingDict['OperationType'] = 'replicateAndRegister'
    result = getProxyInfo()
    if not result['OK']:
      userName = '******'
    else:
      userName = result['Value'].get( 'username', 'unknown' )
    accountingDict['User'] = userName
    accountingDict['Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
    accountingDict['RegistrationTime'] = regTime
    accountingDict['RegistrationOK'] = regSuc
    accountingDict['RegistrationTotal'] = regTotal
    accountingDict['TransferOK'] = transDict['transOK']
    accountingDict['TransferTotal'] = transDict['transTotal']
    accountingDict['TransferSize'] = transDict['transSize']
    accountingDict['FinalStatus'] = self.requestStatus
    accountingDict['Source'] = self.sourceSE
    accountingDict['Destination'] = self.targetSE
    accountingDict['TransferTime'] = self.transferTime
    oAccounting.setValuesFromDict( accountingDict )
    self.log.verbose( "Attempting to commit accounting message..." )
    oAccounting.commit()
    self.log.verbose( "...committed." )
    return S_OK()
Exemple #17
0
  def __sendAccounting( ftsJob, ownerDN ):
    """ prepare and send DataOperation to AccouringDB """

    dataOp = DataOperation()
    dataOp.setStartTime( fromString( ftsJob.SubmitTime ) )
    dataOp.setEndTime( fromString( ftsJob.LastUpdate ) )

    accountingDict = dict()
    accountingDict["OperationType"] = "ReplicateAndRegister"

    username = getUsernameForDN( ownerDN )
    if not username["OK"]:
      username = ownerDN
    else:
      username = username["Value"]

    accountingDict["User"] = username
    accountingDict["Protocol"] = "FTS3" if 'fts3' in ftsJob.FTSServer.lower() else 'FTS'
    accountingDict['ExecutionSite'] = ftsJob.FTSServer

    accountingDict['RegistrationTime'] = ftsJob._regTime
    accountingDict['RegistrationOK'] = ftsJob._regSuccess
    accountingDict['RegistrationTotal'] = ftsJob._regTotal

    accountingDict["TransferOK"] = len( [ f for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
    accountingDict["TransferTotal"] = len( ftsJob )
    accountingDict["TransferSize"] = ftsJob.Size - ftsJob.FailedSize
    accountingDict["FinalStatus"] = ftsJob.Status
    accountingDict["Source"] = ftsJob.SourceSE
    accountingDict["Destination"] = ftsJob.TargetSE

    # dt = ftsJob.LastUpdate - ftsJob.SubmitTime
    # transferTime = dt.days * 86400 + dt.seconds
    # accountingDict["TransferTime"] = transferTime
    accountingDict['TransferTime'] = sum( [int( f._duration ) for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
    dataOp.setValuesFromDict( accountingDict )
    dataOp.commit()
Exemple #18
0
  def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
    """ send accounting record

    :param self: self reference
    :param regSuc: number of files successfully registered
    :param regTotal: number of files attepted to register 
    :param regTime: time stamp at the end of registration 
    :param transEndTime: time stamp at the end of FTS job
    :param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers 
    """

    submitTime = fromString( self.submitTime )
    oAccounting = DataOperation()
    oAccounting.setEndTime( transEndTime )
    oAccounting.setStartTime( submitTime )

    accountingDict = {}
    accountingDict['OperationType'] = 'replicateAndRegister'
    accountingDict['User'] = '******'
    accountingDict['Protocol'] = 'FTS'
    accountingDict['RegistrationTime'] = regTime
    accountingDict['RegistrationOK'] = regSuc
    accountingDict['RegistrationTotal'] = regTotal
    accountingDict['TransferOK'] = transDict['transOK']
    accountingDict['TransferTotal'] = transDict['transTotal']
    accountingDict['TransferSize'] = transDict['transSize']
    accountingDict['FinalStatus'] = self.requestStatus
    accountingDict['Source'] = self.sourceSE
    accountingDict['Destination'] = self.targetSE
    dt = transEndTime - submitTime
    transferTime = dt.days * 86400 + dt.seconds
    accountingDict['TransferTime'] = transferTime
    oAccounting.setValuesFromDict( accountingDict )
    self.log.verbose( "Attempting to commit accounting message..." )
    oAccounting.commit()
    self.log.verbose( "...committed." )
    return S_OK()
Exemple #19
0
 def _summaryDataByDestination(self, startTime, endTime, argsDict):
     """
   argsDict: Destination -> Summary only sites in destination. If not present summary all.
 """
     if 'Destination' not in argsDict:
         condDict = {}
     else:
         condDict = {"Destination": argsDict['Destination']}
     do = DataOperation()
     selectFields = ["Destination"]
     selectStringList = ["%s"]
     for fieldTuple in do.definitionAccountingFields:
         selectStringList.append("%s")
         selectFields.append(fieldTuple[0])
     retVal = self._retrieveBucketedData(
         "DataOperation", startTime, endTime,
         (", ".join(selectStringList), selectFields), condDict,
         ["Destination"], ["Destination"])
     if not retVal['OK']:
         return retVal
     return S_OK((selectFields, retVal['Value']))
Exemple #20
0
 def __initialiseAccountingObject(self, operation, se, startTime, endTime,
                                  size):
     accountingDict = {}
     accountingDict['OperationType'] = operation
     accountingDict['User'] = self.userName
     accountingDict['Protocol'] = 'SRM'
     accountingDict['RegistrationTime'] = 0.0
     accountingDict['RegistrationOK'] = 0
     accountingDict['RegistrationTotal'] = 0
     accountingDict['TransferTotal'] = 1
     accountingDict['TransferOK'] = 1
     accountingDict['TransferSize'] = size
     timeDiff = endTime - startTime
     transferTime = (timeDiff.days * 86400) + (timeDiff.seconds) + (
         timeDiff.microseconds / 1000000.0)
     accountingDict['TransferTime'] = transferTime
     accountingDict['FinalStatus'] = 'Successful'
     accountingDict['Source'] = siteName()
     accountingDict['Destination'] = se
     oDataOperation = DataOperation()
     oDataOperation.setEndTime(endTime)
     oDataOperation.setStartTime(startTime)
     oDataOperation.setValuesFromDict(accountingDict)
     return oDataOperation
 def __initialiseAccountingObject( self, operation, se, startTime, endTime, size ):
   accountingDict = {}
   accountingDict['OperationType'] = operation
   accountingDict['User'] = self.userName
   accountingDict['Protocol'] = 'SRM'
   accountingDict['RegistrationTime'] = 0.0
   accountingDict['RegistrationOK'] = 0
   accountingDict['RegistrationTotal'] = 0
   accountingDict['TransferTotal'] = 1
   accountingDict['TransferOK'] = 1
   accountingDict['TransferSize'] = size
   timeDiff = endTime - startTime
   transferTime = ( timeDiff.days * 86400 ) + ( timeDiff.seconds ) + ( timeDiff.microseconds / 1000000.0 )
   accountingDict['TransferTime'] = transferTime
   accountingDict['FinalStatus'] = 'Successful'
   accountingDict['Source'] = siteName()
   accountingDict['Destination'] = se
   oDataOperation = DataOperation()
   oDataOperation.setEndTime( endTime )
   oDataOperation.setStartTime( startTime )
   oDataOperation.setValuesFromDict( accountingDict )
   return oDataOperation
Exemple #22
0
 def _summaryDataBySourceAndDestination(self, startTime, endTime, argsDict):
     """
   argsDict: 
     - Source -> Summary only sites in source.
     - Destination -> Summary only sites in destination.
 """
     keyFields = ('Destination', 'Source')
     condDict = {}
     for keyword in keyFields:
         if keyword in argsDict:
             condDict[keyword] = argsDict[keyword]
     do = DataOperation()
     selectFields = list(keyFields)
     selectStringList = ["%s, %s"]
     for fieldTuple in do.definitionAccountingFields:
         selectStringList.append("%s")
         selectFields.append(fieldTuple[0])
     retVal = self._retrieveBucketedData(
         "DataOperation", startTime, endTime,
         (", ".join(selectStringList), selectFields), condDict, keyFields,
         keyFields)
     if not retVal['OK']:
         return retVal
     return S_OK((selectFields, retVal['Value']))
Exemple #23
0
  def __monitorStorageElementStageRequests( self, storageElement, seReplicaIDs, replicaIDs ):
    terminalReplicaIDs = {}
    oldRequests = []
    stagedReplicas = []

    # Since we are in a given SE, the LFN is a unique key
    lfnRepIDs = {}
    for replicaID in seReplicaIDs:
      lfn = replicaIDs[replicaID]['LFN']
      lfnRepIDs[lfn] = replicaID

    if lfnRepIDs:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s." % ( len( lfnRepIDs ),
                                                                                                                  storageElement ) )
    else:
      gLogger.warn( "StageMonitor.__monitorStorageElementStageRequests: No requests to monitor for %s." % storageElement )
      return
    oAccounting = DataOperation()
    oAccounting.setStartTime()

    res = StorageElement( storageElement ).getFileMetadata( lfnRepIDs )
    if not res['OK']:
      gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.", res['Message'] )
      return
    prestageStatus = res['Value']

    accountingDict = self.__newAccountingDict( storageElement )

    for lfn, reason in prestageStatus['Failed'].iteritems():
      accountingDict['TransferTotal'] += 1
      if re.search( 'File does not exist', reason ):
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: LFN did not exist in the StorageElement", lfn )
        terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN did not exist in the StorageElement'
    for lfn, metadata in prestageStatus['Successful'].iteritems():
      if not metadata:
        continue
      staged = metadata.get( 'Cached', metadata['Accessible'] )
      if staged:
        accountingDict['TransferTotal'] += 1
        accountingDict['TransferOK'] += 1
        accountingDict['TransferSize'] += metadata['Size']
        stagedReplicas.append( lfnRepIDs[lfn] )
      elif staged is not None:
        oldRequests.append( lfnRepIDs[lfn] )  # only ReplicaIDs

    oAccounting.setValuesFromDict( accountingDict )
    oAccounting.setEndTime()
    gDataStoreClient.addRegister( oAccounting )

    # Update the states of the replicas in the database
    if terminalReplicaIDs:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
      res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.", res['Message'] )
    if stagedReplicas:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated." % len( stagedReplicas ) )
      res = self.stagerClient.setStageComplete( stagedReplicas )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.", res['Message'] )
      res = self.stagerClient.updateReplicaStatus( stagedReplicas, 'Staged' )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to insert replica status.", res['Message'] )
    if oldRequests:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s old requests will be retried." % len( oldRequests ) )
      res = self.__wakeupOldRequests( oldRequests )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to wakeup old requests.", res['Message'] )
    return
Exemple #24
0
  def addAccountingOperation(self, lfns, startDate, elapsedTime, storageParameters, callRes):
    """
        Generates a DataOperation accounting if needs to be, and adds it to the DataStore client cache

        :param lfns : list of lfns on which we attempted the operation
        :param startDate : datetime, start of the operation
        :param elapsedTime : time (seconds) the operation took
        :param storageParameters : the parameters of the plugins used to perform the operation
        :param callRes : the return of the method call, S_OK or S_ERROR

        The operation is generated with the OperationType "se.methodName"
        The TransferSize and TransferTotal for directory methods actually take into
        account the files inside the directory, and not the amount of directory given
        as parameter


    """

    if self.methodName not in (self.readMethods + self.writeMethods + self.removeMethods):
      return

    baseAccountingDict = {}
    baseAccountingDict['OperationType'] = 'se.%s' % self.methodName
    baseAccountingDict['User'] = getProxyInfo().get('Value', {}).get('username', 'unknown')
    baseAccountingDict['RegistrationTime'] = 0.0
    baseAccountingDict['RegistrationOK'] = 0
    baseAccountingDict['RegistrationTotal'] = 0

    # if it is a get method, then source and destination of the transfer should be inverted
    if self.methodName == 'getFile':
      baseAccountingDict['Destination'] = siteName()
      baseAccountingDict['Source'] = self.name
    else:
      baseAccountingDict['Destination'] = self.name
      baseAccountingDict['Source'] = siteName()

    baseAccountingDict['TransferTotal'] = 0
    baseAccountingDict['TransferOK'] = 0
    baseAccountingDict['TransferSize'] = 0
    baseAccountingDict['TransferTime'] = 0.0
    baseAccountingDict['FinalStatus'] = 'Successful'

    oDataOperation = DataOperation()
    oDataOperation.setValuesFromDict(baseAccountingDict)
    oDataOperation.setStartTime(startDate)
    oDataOperation.setEndTime(startDate + datetime.timedelta(seconds=elapsedTime))
    oDataOperation.setValueByKey('TransferTime', elapsedTime)
    oDataOperation.setValueByKey('Protocol', storageParameters.get('Protocol', 'unknown'))

    if not callRes['OK']:
      # Everything failed
      oDataOperation.setValueByKey('TransferTotal', len(lfns))
      oDataOperation.setValueByKey('FinalStatus', 'Failed')
    else:

      succ = callRes.get('Value', {}).get('Successful', {})
      failed = callRes.get('Value', {}).get('Failed', {})

      totalSize = 0
      # We don't take len(lfns) in order to make two
      # separate entries in case of few failures
      totalSucc = len(succ)

      if self.methodName in ('putFile', 'getFile'):
        # putFile and getFile return for each entry
        # in the successful dir the size of the corresponding file
        totalSize = sum(succ.values())

      elif self.methodName in ('putDirectory', 'getDirectory'):
        # putDirectory and getDirectory return for each dir name
        # a dictionnary with the keys 'Files' and 'Size'
        totalSize = sum(val.get('Size', 0) for val in succ.values() if isinstance(val, dict))
        totalSucc = sum(val.get('Files', 0) for val in succ.values() if isinstance(val, dict))
        oDataOperation.setValueByKey('TransferOK', len(succ))

      oDataOperation.setValueByKey('TransferSize', totalSize)
      oDataOperation.setValueByKey('TransferTotal', totalSucc)
      oDataOperation.setValueByKey('TransferOK', totalSucc)

      if callRes['Value']['Failed']:
        oDataOperationFailed = copy.deepcopy(oDataOperation)
        oDataOperationFailed.setValueByKey('TransferTotal', len(failed))
        oDataOperationFailed.setValueByKey('TransferOK', 0)
        oDataOperationFailed.setValueByKey('TransferSize', 0)
        oDataOperationFailed.setValueByKey('FinalStatus', 'Failed')

        accRes = gDataStoreClient.addRegister(oDataOperationFailed)
        if not accRes['OK']:
          self.log.error("Could not send failed accounting report", accRes['Message'])

    accRes = gDataStoreClient.addRegister(oDataOperation)
    if not accRes['OK']:
      self.log.error("Could not send accounting report", accRes['Message'])
    def __monitorStorageElementStageRequests(self, storageElement,
                                             seReplicaIDs, replicaIDs):
        terminalReplicaIDs = {}
        oldRequests = []
        stagedReplicas = []

        # Since we are in a given SE, the LFN is a unique key
        lfnRepIDs = {}
        lfnReqIDs = {}
        for replicaID in seReplicaIDs:
            lfn = replicaIDs[replicaID]['LFN']
            lfnRepIDs[lfn] = replicaID
            requestID = replicaIDs[replicaID].get('RequestID', None)
            if requestID:
                lfnReqIDs[lfn] = replicaIDs[replicaID]['RequestID']

        gLogger.info(
            "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s."
            % (len(lfnRepIDs), storageElement))
        oAccounting = DataOperation()
        oAccounting.setStartTime()

        res = StorageElement(storageElement).getFileMetadata(lfnReqIDs)
        if not res['OK']:
            gLogger.error(
                "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.",
                res['Message'])
            return
        prestageStatus = res['Value']

        accountingDict = self.__newAccountingDict(storageElement)

        for lfn, reason in prestageStatus['Failed'].items():
            accountingDict['TransferTotal'] += 1
            if re.search('File does not exist', reason):
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: LFN did not exist in the StorageElement",
                    lfn)
                terminalReplicaIDs[
                    lfnRepIDs[lfn]] = 'LFN did not exist in the StorageElement'
        for lfn, staged in prestageStatus['Successful'].items():
            if staged and 'Cached' in staged and staged['Cached']:
                accountingDict['TransferTotal'] += 1
                accountingDict['TransferOK'] += 1
                accountingDict['TransferSize'] += staged['Size']
                stagedReplicas.append(lfnRepIDs[lfn])
            if staged and 'Cached' in staged and not staged['Cached']:
                oldRequests.append(lfnRepIDs[lfn])
                # only ReplicaIDs

        oAccounting.setValuesFromDict(accountingDict)
        oAccounting.setEndTime()
        gDataStoreClient.addRegister(oAccounting)

        # Update the states of the replicas in the database
        if terminalReplicaIDs:
            gLogger.info(
                "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed."
                % len(terminalReplicaIDs))
            res = self.stagerClient.updateReplicaFailure(terminalReplicaIDs)
            if not res['OK']:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.",
                    res['Message'])
        if stagedReplicas:
            gLogger.info(
                "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated."
                % len(stagedReplicas))
            res = self.stagerClient.setStageComplete(stagedReplicas)
            if not res['OK']:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.",
                    res['Message'])
            res = self.stagerClient.updateReplicaStatus(
                stagedReplicas, 'Staged')
            if not res['OK']:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to insert replica status.",
                    res['Message'])
        if oldRequests:
            gLogger.info(
                "StageMonitor.__monitorStorageElementStageRequests: %s old requests will be retried."
                % len(oldRequests))
            res = self.__wakeupOldRequests(oldRequests)
            if not res['OK']:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to wakeup old requests.",
                    res['Message'])
        return
Exemple #26
0
class DataOperationPlotter(BaseReporter):

    _typeName = "DataOperation"
    _typeKeyFields = [dF[0] for dF in DataOperation().definitionKeyFields]

    def _translateGrouping(self, grouping):
        if grouping == "Channel":
            return ("%s, %s", ['Source',
                               'Destination'], "CONCAT( %s, ' -> ', %s )")
        else:
            return ("%s", [grouping])

    _reportSuceededTransfersName = "Successful transfers"

    def _reportSuceededTransfers(self, reportRequest):
        return self.__reportTransfers(reportRequest, 'Succeeded',
                                      ('Failed', 0))

    _reportFailedTransfersName = "Failed transfers"

    def _reportFailedTransfers(self, reportRequest):
        return self.__reportTransfers(reportRequest, 'Failed',
                                      ('Succeeded', 1))

    def __reportTransfers(self, reportRequest, titleType,
                          togetherFieldsToPlot):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest['groupingFields']) +
            ", %s, %s, SUM(%s), SUM(%s)-SUM(%s)",
            reportRequest['groupingFields'][1] + [
                'startTime',
                'bucketLength',
                'TransferOK',
                'TransferTotal',
                'TransferOK',
            ])
        retVal = self._getTimedData(reportRequest['startTime'],
                                    reportRequest['endTime'], selectFields,
                                    reportRequest['condDict'],
                                    reportRequest['groupingFields'], {})
        if not retVal['OK']:
            return retVal
        dataDict, granularity = retVal['Value']
        strippedData = self.stripDataField(dataDict, togetherFieldsToPlot[1])
        if strippedData:
            dataDict[togetherFieldsToPlot[0]] = strippedData[0]
        dataDict, maxValue = self._divideByFactor(dataDict, granularity)
        dataDict = self._fillWithZero(granularity, reportRequest['startTime'],
                                      reportRequest['endTime'], dataDict)
        baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
            dataDict, self._getAccumulationMaxValue(dataDict), "files")
        return S_OK({
            'data': baseDataDict,
            'graphDataDict': graphDataDict,
            'granularity': granularity,
            'unit': unitName
        })

    def _plotSuceededTransfers(self, reportRequest, plotInfo, filename):
        return self.__plotTransfers(reportRequest, plotInfo, filename,
                                    'Succeeded', ('Failed', 0))

    def _plotFailedTransfers(self, reportRequest, plotInfo, filename):
        return self.__plotTransfers(reportRequest, plotInfo, filename,
                                    'Failed', ('Succeeded', 1))

    def __plotTransfers(self, reportRequest, plotInfo, filename, titleType,
                        togetherFieldsToPlot):
        metadata = {
            'title':
            '%s Transfers by %s' % (titleType, reportRequest['grouping']),
            'ylabel': plotInfo['unit'],
            'starttime': reportRequest['startTime'],
            'endtime': reportRequest['endTime'],
            'span': plotInfo['granularity']
        }
        return self._generateTimedStackedBarPlot(filename,
                                                 plotInfo['graphDataDict'],
                                                 metadata)

    _reportQualityName = "Efficiency by protocol"

    def _reportQuality(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest['groupingFields']) +
            ", %s, %s, SUM(%s), SUM(%s)", reportRequest['groupingFields'][1] +
            ['startTime', 'bucketLength', 'TransferOK', 'TransferTotal'])
        retVal = self._getTimedData(
            reportRequest['startTime'], reportRequest['endTime'], selectFields,
            reportRequest['condDict'], reportRequest['groupingFields'], {
                'checkNone': True,
                'convertToGranularity': 'sum',
                'calculateProportionalGauges': False,
                'consolidationFunction': self._efficiencyConsolidation
            })
        if not retVal['OK']:
            return retVal
        dataDict, granularity = retVal['Value']
        self.stripDataField(dataDict, 0)
        if len(dataDict) > 1:
            # Get the total for the plot
            selectFields = ("'Total', %s, %s, SUM(%s),SUM(%s)", [
                'startTime', 'bucketLength', 'TransferOK', 'TransferTotal'
            ])
            retVal = self._getTimedData(
                reportRequest['startTime'], reportRequest['endTime'],
                selectFields, reportRequest['condDict'],
                reportRequest['groupingFields'], {
                    'checkNone': True,
                    'convertToGranularity': 'sum',
                    'calculateProportionalGauges': False,
                    'consolidationFunction': self._efficiencyConsolidation
                })
            if not retVal['OK']:
                return retVal
            totalDict = retVal['Value'][0]
            self.stripDataField(totalDict, 0)
            for key in totalDict:
                dataDict[key] = totalDict[key]
        return S_OK({'data': dataDict, 'granularity': granularity})

    def _plotQuality(self, reportRequest, plotInfo, filename):
        metadata = {
            'title': 'Transfer quality by %s' % reportRequest['grouping'],
            'starttime': reportRequest['startTime'],
            'endtime': reportRequest['endTime'],
            'span': plotInfo['granularity']
        }
        return self._generateQualityPlot(filename, plotInfo['data'], metadata)

    _reportTransferedDataName = "Cumulative transferred data"

    def _reportTransferedData(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest['groupingFields']) +
            ", %s, %s, SUM(%s)", reportRequest['groupingFields'][1] +
            ['startTime', 'bucketLength', 'TransferSize'])
        retVal = self._getTimedData(reportRequest['startTime'],
                                    reportRequest['endTime'], selectFields,
                                    reportRequest['condDict'],
                                    reportRequest['groupingFields'], {})
        if not retVal['OK']:
            return retVal
        dataDict, granularity = retVal['Value']
        self.stripDataField(dataDict, 0)
        dataDict = self._fillWithZero(granularity, reportRequest['startTime'],
                                      reportRequest['endTime'], dataDict)
        dataDict = self._accumulate(granularity, reportRequest['startTime'],
                                    reportRequest['endTime'], dataDict)
        baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableUnit(
            dataDict, self._getAccumulationMaxValue(dataDict), "bytes")
        return S_OK({
            'data': baseDataDict,
            'graphDataDict': graphDataDict,
            'granularity': granularity,
            'unit': unitName
        })

    def _plotTransferedData(self, reportRequest, plotInfo, filename):
        metadata = {
            'title': 'Transfered data by %s' % reportRequest['grouping'],
            'starttime': reportRequest['startTime'],
            'endtime': reportRequest['endTime'],
            'span': plotInfo['granularity'],
            'ylabel': plotInfo['unit'],
            'sort_labels': 'last_value'
        }
        return self._generateCumulativePlot(filename,
                                            plotInfo['graphDataDict'],
                                            metadata)

    def _reportThroughput(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest['groupingFields']) +
            ", %s, %s, SUM(%s)", reportRequest['groupingFields'][1] +
            ['startTime', 'bucketLength', 'TransferSize'])
        retVal = self._getTimedData(reportRequest['startTime'],
                                    reportRequest['endTime'], selectFields,
                                    reportRequest['condDict'],
                                    reportRequest['groupingFields'], {})
        if not retVal['OK']:
            return retVal
        dataDict, granularity = retVal['Value']
        self.stripDataField(dataDict, 0)
        dataDict, maxValue = self._divideByFactor(dataDict, granularity)
        dataDict = self._fillWithZero(granularity, reportRequest['startTime'],
                                      reportRequest['endTime'], dataDict)
        baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
            dataDict, self._getAccumulationMaxValue(dataDict), "bytes")
        return S_OK({
            'data': baseDataDict,
            'graphDataDict': graphDataDict,
            'granularity': granularity,
            'unit': unitName
        })

    def _plotThroughput(self, reportRequest, plotInfo, filename):
        metadata = {
            'title': 'Throughput by %s' % reportRequest['grouping'],
            'ylabel': plotInfo['unit'],
            'starttime': reportRequest['startTime'],
            'endtime': reportRequest['endTime'],
            'span': plotInfo['granularity']
        }
        return self._generateTimedStackedBarPlot(filename,
                                                 plotInfo['graphDataDict'],
                                                 metadata)

    _reportDataTransferedName = "Pie chart of transferred data"

    def _reportDataTransfered(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest['groupingFields']) +
            ", SUM(%s)", reportRequest['groupingFields'][1] + ['TransferSize'])
        retVal = self._getSummaryData(reportRequest['startTime'],
                                      reportRequest['endTime'], selectFields,
                                      reportRequest['condDict'],
                                      reportRequest['groupingFields'], {})
        if not retVal['OK']:
            return retVal
        dataDict = retVal['Value']
        for key in dataDict:
            dataDict[key] = int(dataDict[key])
        return S_OK({'data': dataDict})

    def _plotDataTransfered(self, reportRequest, plotInfo, filename):
        metadata = {
            'title': 'Total data transfered by %s' % reportRequest['grouping'],
            'ylabel': 'bytes',
            'starttime': reportRequest['startTime'],
            'endtime': reportRequest['endTime']
        }
        return self._generatePiePlot(filename, plotInfo['data'], metadata)
class DataOperationPlotter(BaseReporter):

    _typeName = "DataOperation"
    _typeKeyFields = [dF[0] for dF in DataOperation().definitionKeyFields]

    def _translateGrouping(self, grouping):
        if grouping == "Channel":
            return ("%s, %s", ["Source",
                               "Destination"], "CONCAT( %s, ' -> ', %s )")
        else:
            return ("%s", [grouping])

    _reportSuceededTransfersName = "Successful transfers"

    def _reportSuceededTransfers(self, reportRequest):
        return self.__reportTransfers(reportRequest, "Succeeded",
                                      ("Failed", 0))

    _reportFailedTransfersName = "Failed transfers"

    def _reportFailedTransfers(self, reportRequest):
        return self.__reportTransfers(reportRequest, "Failed",
                                      ("Succeeded", 1))

    def __reportTransfers(self, reportRequest, titleType,
                          togetherFieldsToPlot):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest["groupingFields"]) +
            ", %s, %s, SUM(%s), SUM(%s)-SUM(%s)",
            reportRequest["groupingFields"][1] + [
                "startTime",
                "bucketLength",
                "TransferOK",
                "TransferTotal",
                "TransferOK",
            ],
        )
        retVal = self._getTimedData(
            reportRequest["startTime"],
            reportRequest["endTime"],
            selectFields,
            reportRequest["condDict"],
            reportRequest["groupingFields"],
            {},
        )
        if not retVal["OK"]:
            return retVal
        dataDict, granularity = retVal["Value"]
        strippedData = self.stripDataField(dataDict, togetherFieldsToPlot[1])
        if strippedData:
            dataDict[togetherFieldsToPlot[0]] = strippedData[0]
        dataDict, maxValue = self._divideByFactor(dataDict, granularity)
        dataDict = self._fillWithZero(granularity, reportRequest["startTime"],
                                      reportRequest["endTime"], dataDict)
        baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
            dataDict, self._getAccumulationMaxValue(dataDict), "files")
        return S_OK({
            "data": baseDataDict,
            "graphDataDict": graphDataDict,
            "granularity": granularity,
            "unit": unitName
        })

    def _plotSuceededTransfers(self, reportRequest, plotInfo, filename):
        return self.__plotTransfers(reportRequest, plotInfo, filename,
                                    "Succeeded", ("Failed", 0))

    def _plotFailedTransfers(self, reportRequest, plotInfo, filename):
        return self.__plotTransfers(reportRequest, plotInfo, filename,
                                    "Failed", ("Succeeded", 1))

    def __plotTransfers(self, reportRequest, plotInfo, filename, titleType,
                        togetherFieldsToPlot):
        metadata = {
            "title":
            "%s Transfers by %s" % (titleType, reportRequest["grouping"]),
            "ylabel": plotInfo["unit"],
            "starttime": reportRequest["startTime"],
            "endtime": reportRequest["endTime"],
            "span": plotInfo["granularity"],
        }
        return self._generateTimedStackedBarPlot(filename,
                                                 plotInfo["graphDataDict"],
                                                 metadata)

    _reportQualityName = "Efficiency by protocol"

    def _reportQuality(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest["groupingFields"]) +
            ", %s, %s, SUM(%s), SUM(%s)",
            reportRequest["groupingFields"][1] +
            ["startTime", "bucketLength", "TransferOK", "TransferTotal"],
        )
        retVal = self._getTimedData(
            reportRequest["startTime"],
            reportRequest["endTime"],
            selectFields,
            reportRequest["condDict"],
            reportRequest["groupingFields"],
            {
                "checkNone": True,
                "convertToGranularity": "sum",
                "calculateProportionalGauges": False,
                "consolidationFunction": self._efficiencyConsolidation,
            },
        )
        if not retVal["OK"]:
            return retVal
        dataDict, granularity = retVal["Value"]
        self.stripDataField(dataDict, 0)
        if len(dataDict) > 1:
            # Get the total for the plot
            selectFields = (
                "'Total', %s, %s, SUM(%s),SUM(%s)",
                ["startTime", "bucketLength", "TransferOK", "TransferTotal"],
            )
            retVal = self._getTimedData(
                reportRequest["startTime"],
                reportRequest["endTime"],
                selectFields,
                reportRequest["condDict"],
                reportRequest["groupingFields"],
                {
                    "checkNone": True,
                    "convertToGranularity": "sum",
                    "calculateProportionalGauges": False,
                    "consolidationFunction": self._efficiencyConsolidation,
                },
            )
            if not retVal["OK"]:
                return retVal
            totalDict = retVal["Value"][0]
            self.stripDataField(totalDict, 0)
            for key in totalDict:
                dataDict[key] = totalDict[key]
        return S_OK({"data": dataDict, "granularity": granularity})

    def _plotQuality(self, reportRequest, plotInfo, filename):
        metadata = {
            "title": "Transfer quality by %s" % reportRequest["grouping"],
            "starttime": reportRequest["startTime"],
            "endtime": reportRequest["endTime"],
            "span": plotInfo["granularity"],
        }
        return self._generateQualityPlot(filename, plotInfo["data"], metadata)

    _reportTransferedDataName = "Cumulative transferred data"

    def _reportTransferedData(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest["groupingFields"]) +
            ", %s, %s, SUM(%s)",
            reportRequest["groupingFields"][1] +
            ["startTime", "bucketLength", "TransferSize"],
        )
        retVal = self._getTimedData(
            reportRequest["startTime"],
            reportRequest["endTime"],
            selectFields,
            reportRequest["condDict"],
            reportRequest["groupingFields"],
            {},
        )
        if not retVal["OK"]:
            return retVal
        dataDict, granularity = retVal["Value"]
        self.stripDataField(dataDict, 0)
        dataDict = self._fillWithZero(granularity, reportRequest["startTime"],
                                      reportRequest["endTime"], dataDict)
        dataDict = self._accumulate(granularity, reportRequest["startTime"],
                                    reportRequest["endTime"], dataDict)
        baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableUnit(
            dataDict, self._getAccumulationMaxValue(dataDict), "bytes")
        return S_OK({
            "data": baseDataDict,
            "graphDataDict": graphDataDict,
            "granularity": granularity,
            "unit": unitName
        })

    def _plotTransferedData(self, reportRequest, plotInfo, filename):
        metadata = {
            "title": "Transfered data by %s" % reportRequest["grouping"],
            "starttime": reportRequest["startTime"],
            "endtime": reportRequest["endTime"],
            "span": plotInfo["granularity"],
            "ylabel": plotInfo["unit"],
            "sort_labels": "last_value",
        }
        return self._generateCumulativePlot(filename,
                                            plotInfo["graphDataDict"],
                                            metadata)

    def _reportThroughput(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest["groupingFields"]) +
            ", %s, %s, SUM(%s)",
            reportRequest["groupingFields"][1] +
            ["startTime", "bucketLength", "TransferSize"],
        )
        retVal = self._getTimedData(
            reportRequest["startTime"],
            reportRequest["endTime"],
            selectFields,
            reportRequest["condDict"],
            reportRequest["groupingFields"],
            {},
        )
        if not retVal["OK"]:
            return retVal
        dataDict, granularity = retVal["Value"]
        self.stripDataField(dataDict, 0)
        dataDict, maxValue = self._divideByFactor(dataDict, granularity)
        dataDict = self._fillWithZero(granularity, reportRequest["startTime"],
                                      reportRequest["endTime"], dataDict)
        baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
            dataDict, self._getAccumulationMaxValue(dataDict), "bytes")
        return S_OK({
            "data": baseDataDict,
            "graphDataDict": graphDataDict,
            "granularity": granularity,
            "unit": unitName
        })

    def _plotThroughput(self, reportRequest, plotInfo, filename):
        metadata = {
            "title": "Throughput by %s" % reportRequest["grouping"],
            "ylabel": plotInfo["unit"],
            "starttime": reportRequest["startTime"],
            "endtime": reportRequest["endTime"],
            "span": plotInfo["granularity"],
        }
        return self._generateTimedStackedBarPlot(filename,
                                                 plotInfo["graphDataDict"],
                                                 metadata)

    _reportDataTransferedName = "Pie chart of transferred data"

    def _reportDataTransfered(self, reportRequest):
        selectFields = (
            self._getSelectStringForGrouping(reportRequest["groupingFields"]) +
            ", SUM(%s)",
            reportRequest["groupingFields"][1] + ["TransferSize"],
        )
        retVal = self._getSummaryData(
            reportRequest["startTime"],
            reportRequest["endTime"],
            selectFields,
            reportRequest["condDict"],
            reportRequest["groupingFields"],
            {},
        )
        if not retVal["OK"]:
            return retVal
        dataDict = retVal["Value"]
        for key in dataDict:
            dataDict[key] = int(dataDict[key])
        return S_OK({"data": dataDict})

    def _plotDataTransfered(self, reportRequest, plotInfo, filename):
        metadata = {
            "title": "Total data transfered by %s" % reportRequest["grouping"],
            "ylabel": "bytes",
            "starttime": reportRequest["startTime"],
            "endtime": reportRequest["endTime"],
        }
        return self._generatePiePlot(filename, plotInfo["data"], metadata)
Exemple #28
0
  def __monitorStorageElementStageRequests( self, storageElement, seReplicaIDs, replicaIDs ):
    terminalReplicaIDs = {}
    oldRequests = []
    stagedReplicas = []
    pfnRepIDs = {}
    pfnReqIDs = {}
    for replicaID in seReplicaIDs:
      pfn = replicaIDs[replicaID]['PFN']
      pfnRepIDs[pfn] = replicaID
      requestID = replicaIDs[replicaID].get( 'RequestID', None )
      if requestID:
        pfnReqIDs[pfn] = replicaIDs[replicaID]['RequestID']

    gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
    oAccounting = DataOperation()
    oAccounting.setStartTime()

    res = self.replicaManager.getStorageFileMetadata( pfnReqIDs.keys(), storageElement )
    if not res['OK']:
      gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.", res['Message'] )
      return
    prestageStatus = res['Value']

    accountingDict = self.__newAccountingDict( storageElement )

    for pfn, reason in prestageStatus['Failed'].items():
      accountingDict['TransferTotal'] += 1
      if re.search( 'File does not exist', reason ):
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: PFN did not exist in the StorageElement", pfn )
        terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN did not exist in the StorageElement'
    for pfn, staged in prestageStatus['Successful'].items():
      if staged and 'Cached' in staged and staged['Cached']:
        accountingDict['TransferTotal'] += 1
        accountingDict['TransferOK'] += 1
        accountingDict['TransferSize'] += staged['Size']
        stagedReplicas.append( pfnRepIDs[pfn] )
      if staged and 'Cached' in staged and not staged['Cached']:
        oldRequests.append( pfnRepIDs[pfn] ); #only ReplicaIDs

    oAccounting.setValuesFromDict( accountingDict )
    oAccounting.setEndTime()
    gDataStoreClient.addRegister( oAccounting )

    # Update the states of the replicas in the database
    if terminalReplicaIDs:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
      res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.", res['Message'] )
    if stagedReplicas:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated." % len( stagedReplicas ) )
      res = self.stagerClient.setStageComplete( stagedReplicas )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.", res['Message'] )
      res = self.stagerClient.updateReplicaStatus( stagedReplicas, 'Staged' )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to insert replica status.", res['Message'] )
    if oldRequests:
      gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s old requests will be retried." % len( oldRequests ) )
      res = self.__wakeupOldRequests( oldRequests )
      if not res['OK']:
        gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to wakeup old requests.", res['Message'] )
    return
Exemple #29
0
class DataOperationSender:
    """
    class:: DataOperationSender
    It reads the MonitoringBackends option to decide whether send and commit data operation to either Accounting or Monitoring.
    """

    # Initialize the object so that the Reporters are created only once
    def __init__(self):
        monitoringType = "DataOperation"
        # Will use the `MonitoringBackends/Default` value as monitoring backend unless a flag for `MonitoringBackends/DataOperation` is set.
        self.monitoringOptions = Operations().getMonitoringBackends(
            monitoringType)
        if "Monitoring" in self.monitoringOptions:
            self.dataOperationReporter = MonitoringReporter(monitoringType)
        if "Accounting" in self.monitoringOptions:
            self.dataOp = DataOperation()

    def sendData(self,
                 baseDict,
                 commitFlag=False,
                 delayedCommit=False,
                 startTime=False,
                 endTime=False):
        """
        Sends the input to Monitoring or Acconting based on the monitoringOptions

        :param dict baseDict: contains a key/value pair
        :param bool commitFlag: decides whether to commit the record or not.
        :param bool delayedCommit: decides whether to commit the record with delay (only for sending to Accounting)
        :param int startTime: epoch time, start time of the plot
        :param int endTime: epoch time, end time of the plot
        """
        def sendMonitoring(self):
            baseDict["ExecutionSite"] = DIRAC.siteName()
            baseDict["Channel"] = baseDict["Source"] + "->" + baseDict[
                "Destination"]
            self.dataOperationReporter.addRecord(baseDict)
            if commitFlag:
                result = self.dataOperationReporter.commit()
                sLog.debug("Committing data operation to monitoring")
                if not result["OK"]:
                    sLog.error("Could not commit data operation to monitoring",
                               result["Message"])
                else:
                    sLog.debug("Done committing to monitoring")

        def sendAccounting(self):
            self.dataOp.setValuesFromDict(baseDict)
            if startTime:
                self.dataOp.setStartTime(startTime)
                self.dataOp.setEndTime(endTime)
            else:
                self.dataOp.setStartTime()
                self.dataOp.setEndTime()
            # Adding only to register
            if not commitFlag and not delayedCommit:
                return gDataStoreClient.addRegister(self.dataOp)

            # Adding to register and committing
            if commitFlag and not delayedCommit:
                gDataStoreClient.addRegister(self.dataOp)
                result = gDataStoreClient.commit()
                sLog.debug("Committing data operation to accounting")
                if not result["OK"]:
                    sLog.error("Could not commit data operation to accounting",
                               result["Message"])
                    return result
                sLog.debug("Done committing to accounting")
            # Only late committing
            else:
                result = self.dataOp.delayedCommit()
                if not result["OK"]:
                    sLog.error(
                        "Could not delay-commit data operation to accounting")
                    return result

            # Send data and commit prioritizing the first monitoring option in the list
            for backend in self.monitoringOptions:
                func = locals()[f"send{backend}"]
                res = func()
                if not res["OK"]:
                    return res

        return S_OK()

    # Call this method in order to commit all records added but not yet committed to Accounting and Monitoring
    def concludeSending(self):
        def commitAccounting():
            result = gDataStoreClient.commit()
            sLog.debug(
                "Concluding the sending and committing data operation to accounting"
            )
            if not result["OK"]:
                sLog.error("Could not commit data operation to accounting",
                           result["Message"])
            sLog.debug("Committing to accounting concluded")
            return result

        def commitMonitoring():
            result = self.dataOperationReporter.commit()
            sLog.debug("Committing data operation to monitoring")
            if not result["OK"]:
                sLog.error("Could not commit data operation to monitoring",
                           result["Message"])
            sLog.debug("Committing to monitoring concluded")
            return result

        # Commit data prioritizing first monitoring option in the list
        for backend in self.monitoringOptions:
            func = locals()[f"commit{backend}"]
            res = func()
            if not res["OK"]:
                return res
        return S_OK()
Exemple #30
0
    def addAccountingOperation(self, lfns, startDate, elapsedTime,
                               storageParameters, callRes):
        """
        Generates a DataOperation accounting if needs to be, and adds it to the DataStore client cache

        :param lfns : list of lfns on which we attempted the operation
        :param startDate : datetime, start of the operation
        :param elapsedTime : time (seconds) the operation took
        :param storageParameters : the parameters of the plugins used to perform the operation
        :param callRes : the return of the method call, S_OK or S_ERROR

        The operation is generated with the OperationType "se.methodName"
        The TransferSize and TransferTotal for directory methods actually take into
        account the files inside the directory, and not the amount of directory given
        as parameter


    """

        if self.methodName not in (self.readMethods + self.writeMethods +
                                   self.removeMethods):
            return

        baseAccountingDict = {}
        baseAccountingDict['OperationType'] = 'se.%s' % self.methodName
        baseAccountingDict['User'] = getProxyInfo().get('Value', {}).get(
            'username', 'unknown')
        baseAccountingDict['RegistrationTime'] = 0.0
        baseAccountingDict['RegistrationOK'] = 0
        baseAccountingDict['RegistrationTotal'] = 0

        # if it is a get method, then source and destination of the transfer should be inverted
        if self.methodName == 'getFile':
            baseAccountingDict['Destination'] = siteName()
            baseAccountingDict['Source'] = self.name
        else:
            baseAccountingDict['Destination'] = self.name
            baseAccountingDict['Source'] = siteName()

        baseAccountingDict['TransferTotal'] = 0
        baseAccountingDict['TransferOK'] = 0
        baseAccountingDict['TransferSize'] = 0
        baseAccountingDict['TransferTime'] = 0.0
        baseAccountingDict['FinalStatus'] = 'Successful'

        oDataOperation = DataOperation()
        oDataOperation.setValuesFromDict(baseAccountingDict)
        oDataOperation.setStartTime(startDate)
        oDataOperation.setEndTime(startDate +
                                  datetime.timedelta(seconds=elapsedTime))
        oDataOperation.setValueByKey('TransferTime', elapsedTime)
        oDataOperation.setValueByKey(
            'Protocol', storageParameters.get('Protocol', 'unknown'))

        if not callRes['OK']:
            # Everything failed
            oDataOperation.setValueByKey('TransferTotal', len(lfns))
            oDataOperation.setValueByKey('FinalStatus', 'Failed')
        else:

            succ = callRes.get('Value', {}).get('Successful', {})
            failed = callRes.get('Value', {}).get('Failed', {})

            totalSize = 0
            # We don't take len(lfns) in order to make two
            # separate entries in case of few failures
            totalSucc = len(succ)

            if self.methodName in ('putFile', 'getFile'):
                # putFile and getFile return for each entry
                # in the successful dir the size of the corresponding file
                totalSize = sum(succ.values())

            elif self.methodName in ('putDirectory', 'getDirectory'):
                # putDirectory and getDirectory return for each dir name
                # a dictionnary with the keys 'Files' and 'Size'
                totalSize = sum(
                    val.get('Size', 0) for val in succ.values()
                    if isinstance(val, dict))
                totalSucc = sum(
                    val.get('Files', 0) for val in succ.values()
                    if isinstance(val, dict))
                oDataOperation.setValueByKey('TransferOK', len(succ))

            oDataOperation.setValueByKey('TransferSize', totalSize)
            oDataOperation.setValueByKey('TransferTotal', totalSucc)
            oDataOperation.setValueByKey('TransferOK', totalSucc)

            if callRes['Value']['Failed']:
                oDataOperationFailed = copy.deepcopy(oDataOperation)
                oDataOperationFailed.setValueByKey('TransferTotal',
                                                   len(failed))
                oDataOperationFailed.setValueByKey('TransferOK', 0)
                oDataOperationFailed.setValueByKey('TransferSize', 0)
                oDataOperationFailed.setValueByKey('FinalStatus', 'Failed')

                accRes = gDataStoreClient.addRegister(oDataOperationFailed)
                if not accRes['OK']:
                    self.log.error("Could not send failed accounting report",
                                   accRes['Message'])

        accRes = gDataStoreClient.addRegister(oDataOperation)
        if not accRes['OK']:
            self.log.error("Could not send accounting report",
                           accRes['Message'])
Exemple #31
0
    def __monitorStorageElementStageRequests(self, storageElement, seReplicaIDs, replicaIDs):
        terminalReplicaIDs = {}
        oldRequests = []
        stagedReplicas = []
        pfnRepIDs = {}
        pfnReqIDs = {}
        for replicaID in seReplicaIDs:
            pfn = replicaIDs[replicaID]["PFN"]
            pfnRepIDs[pfn] = replicaID
            requestID = replicaIDs[replicaID].get("RequestID", None)
            if requestID:
                pfnReqIDs[pfn] = replicaIDs[replicaID]["RequestID"]

        gLogger.info(
            "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s."
            % (len(pfnRepIDs), storageElement)
        )
        oAccounting = DataOperation()
        oAccounting.setStartTime()

        res = StorageElement(storageElement).getFileMetadata(pfnReqIDs)
        if not res["OK"]:
            gLogger.error(
                "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.",
                res["Message"],
            )
            return
        prestageStatus = res["Value"]

        accountingDict = self.__newAccountingDict(storageElement)

        for pfn, reason in prestageStatus["Failed"].items():
            accountingDict["TransferTotal"] += 1
            if re.search("File does not exist", reason):
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: PFN did not exist in the StorageElement", pfn
                )
                terminalReplicaIDs[pfnRepIDs[pfn]] = "PFN did not exist in the StorageElement"
        for pfn, staged in prestageStatus["Successful"].items():
            if staged and "Cached" in staged and staged["Cached"]:
                accountingDict["TransferTotal"] += 1
                accountingDict["TransferOK"] += 1
                accountingDict["TransferSize"] += staged["Size"]
                stagedReplicas.append(pfnRepIDs[pfn])
            if staged and "Cached" in staged and not staged["Cached"]:
                oldRequests.append(pfnRepIDs[pfn])
                # only ReplicaIDs

        oAccounting.setValuesFromDict(accountingDict)
        oAccounting.setEndTime()
        gDataStoreClient.addRegister(oAccounting)

        # Update the states of the replicas in the database
        if terminalReplicaIDs:
            gLogger.info(
                "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed."
                % len(terminalReplicaIDs)
            )
            res = self.stagerClient.updateReplicaFailure(terminalReplicaIDs)
            if not res["OK"]:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.",
                    res["Message"],
                )
        if stagedReplicas:
            gLogger.info(
                "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated."
                % len(stagedReplicas)
            )
            res = self.stagerClient.setStageComplete(stagedReplicas)
            if not res["OK"]:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.",
                    res["Message"],
                )
            res = self.stagerClient.updateReplicaStatus(stagedReplicas, "Staged")
            if not res["OK"]:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to insert replica status.",
                    res["Message"],
                )
        if oldRequests:
            gLogger.info(
                "StageMonitor.__monitorStorageElementStageRequests: %s old requests will be retried." % len(oldRequests)
            )
            res = self.__wakeupOldRequests(oldRequests)
            if not res["OK"]:
                gLogger.error(
                    "StageMonitor.__monitorStorageElementStageRequests: Failed to wakeup old requests.", res["Message"]
                )
        return