def monitorStageRequests(self):
        """ This is the third logical task manages the StageSubmitted->Staged transition of the Replicas
    """
        res = self.__getStageSubmittedReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        if not res['Value']:
            gLogger.info(
                "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found"
            )
            return res
        seReplicas = res['Value']['SEReplicas']
        replicaIDs = res['Value']['ReplicaIDs']
        gLogger.info(
            "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring."
            % len(replicaIDs))
        for storageElement, seReplicaIDs in seReplicas.items():
            self.__monitorStorageElementStageRequests(storageElement,
                                                      seReplicaIDs, replicaIDs)

        gDataStoreClient.commit()

        return S_OK()
Exemple #2
0
def test_addAndRemoveDataOperation():

    # just inserting one record
    record = createDataOperationAccountingRecord()
    record.setStartTime()
    record.setEndTime()
    res = gDataStoreClient.addRegister(record)
    assert res['OK']
    res = gDataStoreClient.commit()
    assert res['OK']

    rc = ReportsClient()

    res = rc.listReports('DataOperation')
    assert res['OK']

    res = rc.listUniqueKeyValues('DataOperation')
    assert res['OK']

    res = rc.getReport('DataOperation', 'Successful transfers',
                       datetime.datetime.utcnow(), datetime.datetime.utcnow(),
                       {}, 'Destination')
    assert res['OK']

    # now removing that record
    res = gDataStoreClient.remove(record)
    assert res['OK']
def test_addAndRemove():

  # just inserting one record
  record = createAccountingRecord()
  record.setStartTime()
  record.setEndTime()
  res = gDataStoreClient.addRegister(record)
  assert res['OK']
  res = gDataStoreClient.commit()
  assert res['OK']

  rc = ReportsClient()

  res = rc.listReports('DataOperation')
  assert res['OK']

  res = rc.listUniqueKeyValues('DataOperation')
  assert res['OK']

  res = rc.getReport('DataOperation', 'Successful transfers',
                     datetime.datetime.utcnow(), datetime.datetime.utcnow(),
                     {}, 'Destination')
  assert res['OK']

  # now removing that record
  res = gDataStoreClient.remove(record)
  assert res['OK']
Exemple #4
0
def test_addAndRemoveStorageOccupancy():

    # just inserting one record
    record = createStorageOccupancyAccountingRecord()
    record.setStartTime()
    record.setEndTime()
    res = gDataStoreClient.addRegister(record)
    assert res['OK']
    res = gDataStoreClient.commit()
    assert res['OK']

    rc = ReportsClient()

    res = rc.listReports('StorageOccupancy')
    assert res['OK']

    res = rc.listUniqueKeyValues('StorageOccupancy')
    assert res['OK']

    res = rc.getReport('StorageOccupancy', 'Free and Used Space',
                       datetime.datetime.utcnow(), datetime.datetime.utcnow(),
                       {}, 'StorageElement')
    assert res['OK']

    # now removing that record
    res = gDataStoreClient.remove(record)
    assert res['OK']
Exemple #5
0
    def accountPilots(self, pilotsToAccount, connection):
        """ account for pilots
    """
        accountingFlag = False
        pae = self.am_getOption('PilotAccountingEnabled', 'yes')
        if pae.lower() == "yes":
            accountingFlag = True

        if not pilotsToAccount:
            self.log.info('No pilots to Account')
            return S_OK()

        accountingSent = False
        if accountingFlag:
            retVal = self.pilotDB.getPilotInfo(pilotsToAccount.keys(),
                                               conn=connection)
            if not retVal['OK']:
                self.log.error('Fail to retrieve Info for pilots',
                               retVal['Message'])
                return retVal
            dbData = retVal['Value']
            for pref in dbData:
                if pref in pilotsToAccount:
                    if dbData[pref]['Status'] not in self.finalStateList:
                        dbData[pref]['Status'] = pilotsToAccount[pref][
                            'Status']
                        dbData[pref]['DestinationSite'] = pilotsToAccount[
                            pref]['DestinationSite']
                        dbData[pref]['LastUpdateTime'] = pilotsToAccount[pref][
                            'StatusDate']

            retVal = self.__addPilotsAccountingReport(dbData)
            if not retVal['OK']:
                self.log.error('Fail to retrieve Info for pilots',
                               retVal['Message'])
                return retVal

            self.log.info("Sending accounting records...")
            retVal = gDataStoreClient.commit()
            if not retVal['OK']:
                self.log.error("Can't send accounting reports",
                               retVal['Message'])
            else:
                self.log.info("Accounting sent for %s pilots" %
                              len(pilotsToAccount))
                accountingSent = True

        if not accountingFlag or accountingSent:
            for pRef in pilotsToAccount:
                pDict = pilotsToAccount[pRef]
                self.log.verbose('Setting Status for %s to %s' %
                                 (pRef, pDict['Status']))
                self.pilotDB.setPilotStatus(pRef,
                                            pDict['Status'],
                                            pDict['DestinationSite'],
                                            pDict['StatusDate'],
                                            conn=connection)

        return S_OK()
Exemple #6
0
    def accountPilots(self, pilotsToAccount, connection):
        """account for pilots"""
        accountingFlag = False
        pae = self.am_getOption("PilotAccountingEnabled", "yes")
        if pae.lower() == "yes":
            accountingFlag = True

        if not pilotsToAccount:
            self.log.info("No pilots to Account")
            return S_OK()

        accountingSent = False
        if accountingFlag:
            retVal = self.pilotDB.getPilotInfo(list(pilotsToAccount),
                                               conn=connection)
            if not retVal["OK"]:
                self.log.error("Fail to retrieve Info for pilots",
                               retVal["Message"])
                return retVal
            dbData = retVal["Value"]
            for pref in dbData:
                if pref in pilotsToAccount:
                    if dbData[pref][
                            "Status"] not in PilotStatus.PILOT_FINAL_STATES:
                        dbData[pref]["Status"] = pilotsToAccount[pref][
                            "Status"]
                        dbData[pref]["DestinationSite"] = pilotsToAccount[
                            pref]["DestinationSite"]
                        dbData[pref]["LastUpdateTime"] = pilotsToAccount[pref][
                            "StatusDate"]

            retVal = self._addPilotsAccountingReport(dbData)
            if not retVal["OK"]:
                self.log.error("Fail to retrieve Info for pilots",
                               retVal["Message"])
                return retVal

            self.log.info("Sending accounting records...")
            retVal = gDataStoreClient.commit()
            if not retVal["OK"]:
                self.log.error("Can't send accounting reports",
                               retVal["Message"])
            else:
                self.log.info("Accounting sent for %s pilots" %
                              len(pilotsToAccount))
                accountingSent = True

        if not accountingFlag or accountingSent:
            for pRef in pilotsToAccount:
                pDict = pilotsToAccount[pRef]
                self.log.verbose("Setting Status for %s to %s" %
                                 (pRef, pDict["Status"]))
                self.pilotDB.setPilotStatus(pRef,
                                            pDict["Status"],
                                            pDict["DestinationSite"],
                                            pDict["StatusDate"],
                                            conn=connection)

        return S_OK()
Exemple #7
0
 def commit( self ):
   """
   Commit register to server
   """
   retVal = gDataStoreClient.addRegister( self )
   if not retVal[ 'OK' ]:
     return retVal
   return gDataStoreClient.commit()
 def commit(self):
     """
 Commit register to server
 """
     retVal = gDataStoreClient.addRegister(self)
     if not retVal['OK']:
         return retVal
     return gDataStoreClient.commit()
    def _storeCommand(self, results):
        """ _storeCommand

    Adding records to accounting, on top of what does the derived method.

    :param dict results: something like {'ElementName': 'CERN-HIST-EOS',
                                         'Endpoint': 'httpg://srm-eoslhcb-bis.cern.ch:8443/srm/v2/server',
                                         'Free': 3264963586.10073,
                                         'Total': 8000000000.0,
                                         'SpaceReservation': 'LHCb-Disk'}
    :returns: S_OK/S_ERROR dict
    """

        res = super(FreeDiskSpaceCommand, self)._storeCommand(results)

        if not res['OK']:
            return res

        siteRes = DMSHelpers().getLocalSiteForSE(results['ElementName'])
        if not siteRes['OK']:
            return siteRes
        if not siteRes['Value']:
            return S_OK()

        spaceReservation = results.get('SpaceReservation')

        accountingDict = {
            'SpaceToken': spaceReservation,
            'Endpoint': results['Endpoint'],
            'Site': siteRes['Value']
        }

        results['Used'] = results['Total'] - results['Free']

        for sType in ['Total', 'Free', 'Used']:
            spaceTokenAccounting = SpaceToken()
            spaceTokenAccounting.setNowAsStartAndEndTime()
            spaceTokenAccounting.setValuesFromDict(accountingDict)
            spaceTokenAccounting.setValueByKey('SpaceType', sType)
            spaceTokenAccounting.setValueByKey(
                'Space', int(convertSizeUnits(results[sType], 'MB', 'B')))

            gDataStoreClient.addRegister(spaceTokenAccounting)
        gDataStoreClient.commit()

        return S_OK()
Exemple #10
0
    def sendPilotAccounting(self, pilotDict):
        """ Send pilot accounting record
    """
        for pRef in pilotDict:
            self.log.verbose('Preparing accounting record for pilot %s' % pRef)
            pA = PilotAccounting()
            pA.setEndTime(pilotDict[pRef]['LastUpdateTime'])
            pA.setStartTime(pilotDict[pRef]['SubmissionTime'])
            retVal = CS.getUsernameForDN(pilotDict[pRef]['OwnerDN'])
            if not retVal['OK']:
                userName = '******'
                self.log.error("Can't determine username for dn:",
                               pilotDict[pRef]['OwnerDN'])
            else:
                userName = retVal['Value']
            pA.setValueByKey('User', userName)
            pA.setValueByKey('UserGroup', pilotDict[pRef]['OwnerGroup'])
            result = getSiteForCE(pilotDict[pRef]['DestinationSite'])
            if result['OK'] and result['Value'].strip():
                pA.setValueByKey('Site', result['Value'].strip())
            else:
                pA.setValueByKey('Site', 'Unknown')
            pA.setValueByKey('GridCE', pilotDict[pRef]['DestinationSite'])
            pA.setValueByKey('GridMiddleware', pilotDict[pRef]['GridType'])
            pA.setValueByKey('GridResourceBroker', pilotDict[pRef]['Broker'])
            pA.setValueByKey('GridStatus', pilotDict[pRef]['Status'])
            if not 'Jobs' in pilotDict[pRef]:
                pA.setValueByKey('Jobs', 0)
            else:
                pA.setValueByKey('Jobs', len(pilotDict[pRef]['Jobs']))
            self.log.info("Adding accounting record for pilot %s" %
                          pilotDict[pRef]['PilotID'])
            retVal = gDataStoreClient.addRegister(pA)
            if not retVal['OK']:
                self.log.error('Failed to send accounting info for pilot ',
                               pRef)
            else:
                # Set up AccountingSent flag
                result = pilotAgentsDB.setAccountingFlag(pRef)
                if not result['OK']:
                    self.log.error('Failed to set accounting flag for pilot ',
                                   pRef)

        self.log.info('Committing accounting records for %d pilots' %
                      len(pilotDict))
        result = gDataStoreClient.commit()
        if result['OK']:
            for pRef in pilotDict:
                self.log.verbose('Setting AccountingSent flag for pilot %s' %
                                 pRef)
                result = pilotAgentsDB.setAccountingFlag(pRef)
                if not result['OK']:
                    self.log.error('Failed to set accounting flag for pilot ',
                                   pRef)
        else:
            return result

        return S_OK()
 def __commitRecords( self ):
   if self.recordsToCommit:
     res = gDataStoreClient.commit()
     if not res[ 'OK' ]:
       self.log.error( "Accounting ERROR: commit returned %s" % res )
     else:
       self.log.notice( "%d records committed " % self.recordsToCommit )
       self.recordsToCommit = 0
       self.log.notice( "commit for DataStorage returned: %s" % res )
Exemple #12
0
  def monitorStageRequests( self ):
    """ This is the third logical task manages the StageSubmitted->Staged transition of the Replicas
    """
    res = self.__getStageSubmittedReplicas()
    if not res['OK']:
      gLogger.fatal( "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    if not res['Value']:
      gLogger.info( "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found" )
      return res
    seReplicas = res['Value']['SEReplicas']
    replicaIDs = res['Value']['ReplicaIDs']
    gLogger.info( "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring." % len( replicaIDs ) )
    for storageElement, seReplicaIDs in seReplicas.items():
      self.__monitorStorageElementStageRequests( storageElement, seReplicaIDs, replicaIDs )

    gDataStoreClient.commit()

    return S_OK()
Exemple #13
0
 def commitAccounting():
     result = gDataStoreClient.commit()
     sLog.debug(
         "Concluding the sending and committing data operation to accounting"
     )
     if not result["OK"]:
         sLog.error("Could not commit data operation to accounting",
                    result["Message"])
     sLog.debug("Committing to accounting concluded")
     return result
Exemple #14
0
 def __commitAccounting(self):
     res = gDataStoreClient.commit()
     if not res['OK']:
         self.log.error(
             "while committing %d Popularity records" % self.numPopRows,
             res['Message'])
     else:
         self.log.info(
             "%s records for Popularity type successfully committed" %
             self.numPopRows)
         self.numPopRows = 0
     return res
Exemple #15
0
    def _storeCommand(self, results):
        """
    Stores the results in the cache (SpaceTokenOccupancyCache),
    and adds records to the StorageOccupancy accounting.

    :param dict results: something like {'ElementName': 'CERN-HIST-EOS',
                                         'Endpoint': 'httpg://srm-eoslhcb-bis.cern.ch:8443/srm/v2/server',
                                         'Free': 3264963586.10073,
                                         'Total': 8000000000.0,
                                         'SpaceReservation': 'LHCb-Disk'}
    :returns: S_OK/S_ERROR dict
    """

        # Stores in cache
        res = self.rmClient.addOrModifySpaceTokenOccupancyCache(
            endpoint=results['Endpoint'],
            lastCheckTime=datetime.utcnow(),
            free=results['Free'],
            total=results['Total'],
            token=results['ElementName'])
        if not res['OK']:
            self.log.error("Error calling addOrModifySpaceTokenOccupancyCache",
                           res['Message'])
            return res

        # Now proceed with the accounting
        siteRes = DMSHelpers().getLocalSiteForSE(results['ElementName'])
        if not siteRes['OK']:
            return siteRes

        accountingDict = {
            'StorageElement': results['ElementName'],
            'Endpoint': results['Endpoint'],
            'Site': siteRes['Value'] if siteRes['Value'] else 'unassigned'
        }

        results['Used'] = results['Total'] - results['Free']

        for sType in ['Total', 'Free', 'Used']:
            spaceTokenAccounting = StorageOccupancy()
            spaceTokenAccounting.setNowAsStartAndEndTime()
            spaceTokenAccounting.setValuesFromDict(accountingDict)
            spaceTokenAccounting.setValueByKey('SpaceType', sType)
            spaceTokenAccounting.setValueByKey(
                'Space', int(convertSizeUnits(results[sType], 'MB', 'B')))

            res = gDataStoreClient.addRegister(spaceTokenAccounting)
            if not res['OK']:
                self.log.warn("Could not commit register", res['Message'])
                continue

        return gDataStoreClient.commit()
Exemple #16
0
    def _storeCommand(self, results):
        """
        Stores the results in the cache (SpaceTokenOccupancyCache),
        and adds records to the StorageOccupancy accounting.

        :param dict results: something like {'ElementName': 'CERN-HIST-EOS',
                                             'Endpoint': 'httpg://srm-eoslhcb-bis.cern.ch:8443/srm/v2/server',
                                             'Free': 3264963586.10073,
                                             'Total': 8000000000.0}
        :returns: S_OK/S_ERROR dict
        """

        # Stores in cache
        res = self.rmClient.addOrModifySpaceTokenOccupancyCache(
            endpoint=results["Endpoint"],
            lastCheckTime=datetime.utcnow(),
            free=results["Free"],
            total=results["Total"],
            token=results["ElementName"],
        )
        if not res["OK"]:
            self.log.error("Error calling addOrModifySpaceTokenOccupancyCache",
                           res["Message"])
            return res

        # Now proceed with the accounting
        siteRes = DMSHelpers().getLocalSiteForSE(results["ElementName"])
        if not siteRes["OK"]:
            return siteRes

        accountingDict = {
            "StorageElement": results["ElementName"],
            "Endpoint": results["Endpoint"],
            "Site": siteRes["Value"] if siteRes["Value"] else "unassigned",
        }

        results["Used"] = results["Total"] - results["Free"]

        for sType in ["Total", "Free", "Used"]:
            spaceTokenAccounting = StorageOccupancy()
            spaceTokenAccounting.setNowAsStartAndEndTime()
            spaceTokenAccounting.setValuesFromDict(accountingDict)
            spaceTokenAccounting.setValueByKey("SpaceType", sType)
            spaceTokenAccounting.setValueByKey(
                "Space", int(convertSizeUnits(results[sType], "MB", "B")))

            res = gDataStoreClient.addRegister(spaceTokenAccounting)
            if not res["OK"]:
                self.log.warn("Could not commit register", res["Message"])
                continue

        return gDataStoreClient.commit()
Exemple #17
0
def test_addAndRemoveStorageOccupancy():

    # just inserting one record
    record = createStorageOccupancyAccountingRecord()
    record.setStartTime()
    record.setEndTime()
    res = gDataStoreClient.addRegister(record)
    assert res['OK']
    res = gDataStoreClient.commit()
    assert res['OK']
    # now removing that record
    res = gDataStoreClient.remove(record)
    assert res['OK']
Exemple #18
0
def test_addAndRemoveDataperation():

    # just inserting one record
    record = createDataOperationAccountingRecord()
    record.setStartTime()
    record.setEndTime()
    res = gDataStoreClient.addRegister(record)
    assert res["OK"]
    res = gDataStoreClient.commit()
    assert res["OK"]
    # now removing that record
    res = gDataStoreClient.remove(record)
    assert res["OK"]
  def sendPilotAccounting( self, pilotDict ):
    """ Send pilot accounting record
    """
    for pRef in pilotDict:
      self.log.verbose( 'Preparing accounting record for pilot %s' % pRef )
      pA = PilotAccounting()
      pA.setEndTime( pilotDict[pRef][ 'LastUpdateTime' ] )
      pA.setStartTime( pilotDict[pRef][ 'SubmissionTime' ] )
      retVal = CS.getUsernameForDN( pilotDict[pRef][ 'OwnerDN' ] )
      if not retVal[ 'OK' ]:
        userName = '******'
        self.log.error( "Can't determine username for dn:", pilotDict[pRef][ 'OwnerDN' ] )
      else:
        userName = retVal[ 'Value' ]
      pA.setValueByKey( 'User', userName )
      pA.setValueByKey( 'UserGroup', pilotDict[pRef][ 'OwnerGroup' ] )
      result = getSiteForCE( pilotDict[pRef][ 'DestinationSite' ] )
      if result['OK'] and result[ 'Value' ].strip():
        pA.setValueByKey( 'Site', result['Value'].strip() )
      else:
        pA.setValueByKey( 'Site', 'Unknown' )
      pA.setValueByKey( 'GridCE', pilotDict[pRef][ 'DestinationSite' ] )
      pA.setValueByKey( 'GridMiddleware', pilotDict[pRef][ 'GridType' ] )
      pA.setValueByKey( 'GridResourceBroker', pilotDict[pRef][ 'Broker' ] )
      pA.setValueByKey( 'GridStatus', pilotDict[pRef][ 'Status' ] )
      if not 'Jobs' in pilotDict[pRef]:
        pA.setValueByKey( 'Jobs', 0 )
      else:
        pA.setValueByKey( 'Jobs', len( pilotDict[pRef]['Jobs'] ) )
      self.log.verbose( "Adding accounting record for pilot %s" % pilotDict[pRef][ 'PilotID' ] )
      retVal = gDataStoreClient.addRegister( pA )
      if not retVal[ 'OK' ]:
        self.log.error( 'Failed to send accounting info for pilot ', pRef )
      else:
        # Set up AccountingSent flag
        result = pilotAgentsDB.setAccountingFlag( pRef )
        if not result['OK']:
          self.log.error( 'Failed to set accounting flag for pilot ', pRef )

    self.log.info( 'Committing accounting records for %d pilots' % len( pilotDict ) )
    result = gDataStoreClient.commit()
    if result['OK']:
      for pRef in pilotDict:
        self.log.verbose( 'Setting AccountingSent flag for pilot %s' % pRef )
        result = pilotAgentsDB.setAccountingFlag( pRef )
        if not result['OK']:
          self.log.error( 'Failed to set accounting flag for pilot ', pRef )
    else:
      return result

    return S_OK()
Exemple #20
0
    def sendPilotAccounting(self, pilotDict):
        """ Send pilot accounting record
    """
        for pRef in pilotDict:
            self.log.verbose("Preparing accounting record for pilot %s" % pRef)
            pA = PilotAccounting()
            pA.setEndTime(pilotDict[pRef]["LastUpdateTime"])
            pA.setStartTime(pilotDict[pRef]["SubmissionTime"])
            retVal = CS.getUsernameForDN(pilotDict[pRef]["OwnerDN"])
            if not retVal["OK"]:
                userName = "******"
                self.log.error("Can't determine username for dn:", pilotDict[pRef]["OwnerDN"])
            else:
                userName = retVal["Value"]
            pA.setValueByKey("User", userName)
            pA.setValueByKey("UserGroup", pilotDict[pRef]["OwnerGroup"])
            result = getSiteForCE(pilotDict[pRef]["DestinationSite"])
            if result["OK"] and result["Value"].strip():
                pA.setValueByKey("Site", result["Value"].strip())
            else:
                pA.setValueByKey("Site", "Unknown")
            pA.setValueByKey("GridCE", pilotDict[pRef]["DestinationSite"])
            pA.setValueByKey("GridMiddleware", pilotDict[pRef]["GridType"])
            pA.setValueByKey("GridResourceBroker", pilotDict[pRef]["Broker"])
            pA.setValueByKey("GridStatus", pilotDict[pRef]["Status"])
            if not "Jobs" in pilotDict[pRef]:
                pA.setValueByKey("Jobs", 0)
            else:
                pA.setValueByKey("Jobs", len(pilotDict[pRef]["Jobs"]))
            self.log.info("Adding accounting record for pilot %s" % pilotDict[pRef]["PilotID"])
            retVal = gDataStoreClient.addRegister(pA)
            if not retVal["OK"]:
                self.log.error("Failed to send accounting info for pilot ", pRef)
            else:
                # Set up AccountingSent flag
                result = pilotAgentsDB.setAccountingFlag(pRef)
                if not result["OK"]:
                    self.log.error("Failed to set accounting flag for pilot ", pRef)

        self.log.info("Committing accounting records for %d pilots" % len(pilotDict))
        result = gDataStoreClient.commit()
        if result["OK"]:
            for pRef in pilotDict:
                self.log.verbose("Setting AccountingSent flag for pilot %s" % pRef)
                result = pilotAgentsDB.setAccountingFlag(pRef)
                if not result["OK"]:
                    self.log.error("Failed to set accounting flag for pilot ", pRef)
        else:
            return result

        return S_OK()
Exemple #21
0
  def accountPilots(self, pilotsToAccount, connection):
    """ account for pilots
    """
    accountingFlag = False
    pae = self.am_getOption('PilotAccountingEnabled', 'yes')
    if pae.lower() == "yes":
      accountingFlag = True

    if not pilotsToAccount:
      self.log.info('No pilots to Account')
      return S_OK()

    accountingSent = False
    if accountingFlag:
      retVal = self.pilotDB.getPilotInfo(pilotsToAccount.keys(), conn=connection)
      if not retVal['OK']:
        self.log.error('Fail to retrieve Info for pilots', retVal['Message'])
        return retVal
      dbData = retVal['Value']
      for pref in dbData:
        if pref in pilotsToAccount:
          if dbData[pref]['Status'] not in self.finalStateList:
            dbData[pref]['Status'] = pilotsToAccount[pref]['Status']
            dbData[pref]['DestinationSite'] = pilotsToAccount[pref]['DestinationSite']
            dbData[pref]['LastUpdateTime'] = pilotsToAccount[pref]['StatusDate']

      retVal = self.__addPilotsAccountingReport(dbData)
      if not retVal['OK']:
        self.log.error('Fail to retrieve Info for pilots', retVal['Message'])
        return retVal

      self.log.info("Sending accounting records...")
      retVal = gDataStoreClient.commit()
      if not retVal['OK']:
        self.log.error("Can't send accounting reports", retVal['Message'])
      else:
        self.log.info("Accounting sent for %s pilots" % len(pilotsToAccount))
        accountingSent = True

    if not accountingFlag or accountingSent:
      for pRef in pilotsToAccount:
        pDict = pilotsToAccount[pRef]
        self.log.verbose('Setting Status for %s to %s' % (pRef, pDict['Status']))
        self.pilotDB.setPilotStatus(pRef,
                                    pDict['Status'],
                                    pDict['DestinationSite'],
                                    pDict['StatusDate'],
                                    conn=connection)

    return S_OK()
def test_addAndRemove():

  # just inserting one record
  record = createAccountingRecord()
  record.setStartTime()
  record.setEndTime()
  res = gDataStoreClient.addRegister(record)
  assert res['OK']
  res = gDataStoreClient.commit()
  assert res['OK']

  # now removing that record
  res = gDataStoreClient.remove(record)
  assert res['OK']
  def userStorageAccounting( self ):
    self.log.notice( "-------------------------------------------------------------------------------------\n" )
    self.log.notice( "Generate accounting records for user directories " )
    self.log.notice( "-------------------------------------------------------------------------------------\n" )

    result = self.__stDB.getUserSummary()
    if not result[ 'OK' ]:
      return result
    userCatalogData = result[ 'Value' ]
    print userCatalogData
    self.log.notice( "Got summary for %s users" % ( len( userCatalogData ) ) )
    result = self.__stDB.getUserSummaryPerSE()
    if not result[ 'OK' ]:
      return result
    userSEData = result[ 'Value' ]
    self.log.notice( "Got SE summary for %s users" % ( len( userSEData ) ) )

    now = Time.dateTime()
    numRows = 0
    for user in sorted( userSEData ):
      if user not in userCatalogData:
        self.log.error( "User has SE data but not Catalog data!", user )
        continue
      for se in sorted( userSEData[ user ] ):
        seData = userSEData[ user ][ se ]
        usRecord = UserStorage()
        usRecord.setStartTime( now )
        usRecord.setEndTime( now )
        usRecord.setValueByKey( "User", user )
        usRecord.setValueByKey( "StorageElement", se )
        usRecord.setValueByKey( "LogicalSize", userCatalogData[ user ][ 'Size' ] )
        usRecord.setValueByKey( "LogicalFiles", userCatalogData[ user ][ 'Files' ] )
        usRecord.setValueByKey( "PhysicalSize", seData[ 'Size' ] )
        usRecord.setValueByKey( "PhysicalFiles", seData[ 'Files' ] )
        usRecord.setValueByKey( "StorageSize", 0 )
        usRecord.setValueByKey( "StorageFiles", 0 )
        gDataStoreClient.addRegister( usRecord )
        numRows += 1

      self.log.notice( " User %s is using %.2f GiB (%s files)" % ( user,
                                                                   userCatalogData[ user ][ 'Size' ] / ( 1024.0 ** 3 ),
                                                                   userCatalogData[ user ][ 'Files' ] ) )
    self.log.notice( "Sending %s records to accounting for user storage" % numRows )
    res = gDataStoreClient.commit()
    if not res[ 'OK' ]:
      self.log.notice( "ERROR: committing UserStorage records: %s " % res )
      return S_ERROR( res )
    else:
      self.log.notice( "%s records for UserStorage type successfully committed" % numRows )
 def __updateMigrationAccounting( self, se, migratingFiles, matchingFiles, mismatchingFiles, assumedEndTime, previousMonitorTime ):
   """ Create accounting messages for the overall throughput observed and the total migration time for the files
   """
   allMigrated = matchingFiles + mismatchingFiles
   gMonitor.addMark( "MigratedFiles%s" % se, len( allMigrated ) )
   gMonitor.addMark( "TotalMigratedFiles%s" % se, len( allMigrated ) )
   lfnFileID = {}
   sizesToObtain = []
   for fileID in allMigrated:
     if not migratingFiles[fileID]['Size']:
       lfn = migratingFiles[fileID]['LFN']
       sizesToObtain.append( lfn )
       lfnFileID[lfn] = fileID
   if sizesToObtain:
     res = self.ReplicaManager.getCatalogFileSize( sizesToObtain )
     if not res['OK']:
       gLogger.error( "[%s] __updateMigrationAccounting: Failed to obtain file sizes" % se )
       return res
     for lfn, error in res['Value']['Failed'].items():
       gLogger.error( "[%s] __updateAccounting: Failed to get file size" % se, "%s %s" % ( lfn, error ) )
       migratingFiles[lfnFileID[lfn]]['Size'] = 0
     for lfn, size in res['Value']['Successful'].items():
       migratingFiles[lfnFileID[lfn]]['Size'] = size
   totalSize = 0
   for fileID in allMigrated:
     size = migratingFiles[fileID]['Size']
     totalSize += size
     submitTime = migratingFiles[fileID]['SubmitTime']
     timeDiff = submitTime - assumedEndTime
     migrationTime = ( timeDiff.days * 86400 ) + ( timeDiff.seconds ) + ( timeDiff.microseconds / 1000000.0 )
     gMonitor.addMark( "MigrationTime%s" % se, migrationTime )
     gDataStoreClient.addRegister( self.__initialiseAccountingObject( 'MigrationTime', se, submitTime, assumedEndTime, size ) )
     gDataStoreClient.addRegister( self.__initialiseAccountingObject( 'MigrationThroughput', se, previousMonitorTime, assumedEndTime, size ) )
     oDataOperation = self.__initialiseAccountingObject( 'MigrationSuccess', se, submitTime, assumedEndTime, size )
     if fileID in mismatchingFiles:
       oDataOperation.setValueByKey( 'TransferOK', 0 )
       oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
     gDataStoreClient.addRegister( oDataOperation )
   gMonitor.addMark( "TotalMigratedSize%s" % se, totalSize )
   gMonitor.addMark( "ChecksumMismatches%s" % se, len( mismatchingFiles ) )
   gMonitor.addMark( "TotalChecksumMismatches%s" % se, len( mismatchingFiles ) )
   gMonitor.addMark( "ChecksumMatches%s" % se, len( matchingFiles ) )
   gMonitor.addMark( "TotalChecksumMatches%s" % se, len( matchingFiles ) )
   if allMigrated:
     gLogger.info( '[%s] __updateMigrationAccounting: Attempting to send accounting message...' % se )
     return gDataStoreClient.commit()
   return S_OK()
Exemple #25
0
        def sendAccounting(self):
            self.dataOp.setValuesFromDict(baseDict)
            if startTime:
                self.dataOp.setStartTime(startTime)
                self.dataOp.setEndTime(endTime)
            else:
                self.dataOp.setStartTime()
                self.dataOp.setEndTime()
            # Adding only to register
            if not commitFlag and not delayedCommit:
                return gDataStoreClient.addRegister(self.dataOp)

            # Adding to register and committing
            if commitFlag and not delayedCommit:
                gDataStoreClient.addRegister(self.dataOp)
                result = gDataStoreClient.commit()
                sLog.debug("Committing data operation to accounting")
                if not result["OK"]:
                    sLog.error("Could not commit data operation to accounting",
                               result["Message"])
                    return result
                sLog.debug("Done committing to accounting")
            # Only late committing
            else:
                result = self.dataOp.delayedCommit()
                if not result["OK"]:
                    sLog.error(
                        "Could not delay-commit data operation to accounting")
                    return result

            # Send data and commit prioritizing the first monitoring option in the list
            for backend in self.monitoringOptions:
                func = locals()[f"send{backend}"]
                res = func()
                if not res["OK"]:
                    return res
Exemple #26
0
  def __executeMethod(self, lfn, *args, **kwargs):
    """ Forward the call to each storage in turn until one works.
        The method to be executed is stored in self.methodName

        :param lfn : string, list or dictionary
        :param *args : variable amount of non-keyword arguments. SHOULD BE EMPTY
        :param **kwargs : keyword arguments

        :returns S_OK( { 'Failed': {lfn : reason} , 'Successful': {lfn : value} } )
                The Failed dict contains the lfn only if the operation failed on all the storages
                The Successful dict contains the value returned by the successful storages.

        A special kwargs is 'inputProtocol', which can be specified for putFile. It describes
        the protocol used as source protocol, since there is in principle only one.
    """

    removedArgs = {}
    log = self.log.getSubLogger('__executeMethod')
    log.verbose("preparing the execution of %s" % (self.methodName))

    # args should normaly be empty to avoid problem...
    if args:
      log.verbose("args should be empty!%s" % args)
      # because there is normally only one kw argument, I can move it from args to kwargs
      methDefaultArgs = StorageElementItem.__defaultsArguments.get(self.methodName, {}).keys()
      if methDefaultArgs:
        kwargs[methDefaultArgs[0]] = args[0]
        args = args[1:]
      log.verbose(
          "put it in kwargs, but dirty and might be dangerous!args %s kwargs %s" %
          (args, kwargs))

    # We check the deprecated arguments
    for depArg in StorageElementItem.__deprecatedArguments:
      if depArg in kwargs:
        log.verbose("%s is not an allowed argument anymore. Please change your code!" % depArg)
        removedArgs[depArg] = kwargs[depArg]
        del kwargs[depArg]

    # Set default argument if any
    methDefaultArgs = StorageElementItem.__defaultsArguments.get(self.methodName, {})
    for argName in methDefaultArgs:
      if argName not in kwargs:
        log.debug("default argument %s for %s not present.\
         Setting value %s." % (argName, self.methodName, methDefaultArgs[argName]))
        kwargs[argName] = methDefaultArgs[argName]

    res = checkArgumentFormat(lfn)
    if not res['OK']:
      errStr = "Supplied lfns must be string, list of strings or a dictionary."
      log.debug(errStr)
      return res
    lfnDict = res['Value']

    log.verbose(
        "Attempting to perform '%s' operation with %s lfns." %
        (self.methodName, len(lfnDict)))

    res = self.isValid(operation=self.methodName)
    if not res['OK']:
      return res
    else:
      if not self.valid:
        return S_ERROR(self.errorReason)
    # In case executing putFile, we can assume that all the source urls
    # are from the same protocol. This optional parameter, if defined
    # can be used to ignore some storage plugins and thus save time
    # and avoid fake failures showing in the accounting
    inputProtocol = kwargs.pop('inputProtocol', None)

    successful = {}
    failed = {}
    filteredPlugins = self.__filterPlugins(self.methodName, kwargs.get('protocols'), inputProtocol)
    if not filteredPlugins:
      return S_ERROR(errno.EPROTONOSUPPORT, "No storage plugins matching the requirements\
                                           (operation %s protocols %s inputProtocol %s)" %
                     (self.methodName, kwargs.get('protocols'), inputProtocol))
    # Try all of the storages one by one
    for storage in filteredPlugins:
      # Determine whether to use this storage object
      storageParameters = storage.getParameters()
      pluginName = storageParameters['PluginName']

      if not lfnDict:
        log.debug("No lfns to be attempted for %s protocol." % pluginName)
        continue

      log.verbose("Generating %s protocol URLs for %s." % (len(lfnDict), pluginName))
      replicaDict = kwargs.pop('replicaDict', {})
      if storage.pluginName != "Proxy":
        res = self.__generateURLDict(lfnDict, storage, replicaDict=replicaDict)
        urlDict = res['Value']['Successful']  # url : lfn
        failed.update(res['Value']['Failed'])
      else:
        urlDict = dict([(lfn, lfn) for lfn in lfnDict])
      if not urlDict:
        log.verbose("__executeMethod No urls generated for protocol %s." % pluginName)
      else:
        log.verbose(
            "Attempting to perform '%s' for %s physical files" %
            (self.methodName, len(urlDict)))
        fcn = None
        if hasattr(storage, self.methodName) and callable(getattr(storage, self.methodName)):
          fcn = getattr(storage, self.methodName)
        if not fcn:
          return S_ERROR(
              DErrno.ENOMETH,
              "SE.__executeMethod: unable to invoke %s, it isn't a member function of storage")
        urlsToUse = {}  # url : the value of the lfn dictionary for the lfn of this url
        for url in urlDict:
          urlsToUse[url] = lfnDict[urlDict[url]]

        startDate = datetime.datetime.utcnow()
        startTime = time.time()
        res = fcn(urlsToUse, *args, **kwargs)
        elapsedTime = time.time() - startTime

        self.addAccountingOperation(urlsToUse, startDate, elapsedTime, storageParameters, res)

        if not res['OK']:
          errStr = "Completely failed to perform %s." % self.methodName
          log.debug(errStr, 'with plugin %s: %s' % (pluginName, res['Message']))
          for lfn in urlDict.values():
            if lfn not in failed:
              failed[lfn] = ''
            failed[lfn] = "%s %s" % (failed[lfn], res['Message']) if failed[lfn] else res['Message']

        else:
          for url, lfn in urlDict.items():
            if url not in res['Value']['Successful']:
              if lfn not in failed:
                failed[lfn] = ''
              if url in res['Value']['Failed']:
                self.log.debug(res['Value']['Failed'][url])
                failed[lfn] = "%s %s" % (failed[lfn], res['Value']['Failed'][url]
                                         ) if failed[lfn] else res['Value']['Failed'][url]
              else:
                errStr = 'No error returned from plug-in'
                failed[lfn] = "%s %s" % (failed[lfn], errStr) if failed[lfn] else errStr
            else:
              successful[lfn] = res['Value']['Successful'][url]
              if lfn in failed:
                failed.pop(lfn)
              lfnDict.pop(lfn)

    gDataStoreClient.commit()

    return S_OK({'Failed': failed, 'Successful': successful})
Exemple #27
0
 def __updateMigrationAccounting(self, se, migratingFiles, matchingFiles,
                                 mismatchingFiles, assumedEndTime,
                                 previousMonitorTime):
     """ Create accounting messages for the overall throughput observed and the total migration time for the files
 """
     allMigrated = matchingFiles + mismatchingFiles
     gMonitor.addMark("MigratedFiles%s" % se, len(allMigrated))
     gMonitor.addMark("TotalMigratedFiles%s" % se, len(allMigrated))
     lfnFileID = {}
     sizesToObtain = []
     for fileID in allMigrated:
         if not migratingFiles[fileID]['Size']:
             lfn = migratingFiles[fileID]['LFN']
             sizesToObtain.append(lfn)
             lfnFileID[lfn] = fileID
     if sizesToObtain:
         res = self.ReplicaManager.getCatalogFileSize(sizesToObtain)
         if not res['OK']:
             gLogger.error(
                 "[%s] __updateMigrationAccounting: Failed to obtain file sizes"
                 % se)
             return res
         for lfn, error in res['Value']['Failed'].items():
             gLogger.error(
                 "[%s] __updateAccounting: Failed to get file size" % se,
                 "%s %s" % (lfn, error))
             migratingFiles[lfnFileID[lfn]]['Size'] = 0
         for lfn, size in res['Value']['Successful'].items():
             migratingFiles[lfnFileID[lfn]]['Size'] = size
     totalSize = 0
     for fileID in allMigrated:
         size = migratingFiles[fileID]['Size']
         totalSize += size
         submitTime = migratingFiles[fileID]['SubmitTime']
         timeDiff = submitTime - assumedEndTime
         migrationTime = (timeDiff.days * 86400) + (timeDiff.seconds) + (
             timeDiff.microseconds / 1000000.0)
         gMonitor.addMark("MigrationTime%s" % se, migrationTime)
         gDataStoreClient.addRegister(
             self.__initialiseAccountingObject('MigrationTime', se,
                                               submitTime, assumedEndTime,
                                               size))
         gDataStoreClient.addRegister(
             self.__initialiseAccountingObject('MigrationThroughput', se,
                                               previousMonitorTime,
                                               assumedEndTime, size))
         oDataOperation = self.__initialiseAccountingObject(
             'MigrationSuccess', se, submitTime, assumedEndTime, size)
         if fileID in mismatchingFiles:
             oDataOperation.setValueByKey('TransferOK', 0)
             oDataOperation.setValueByKey('FinalStatus', 'Failed')
         gDataStoreClient.addRegister(oDataOperation)
     gMonitor.addMark("TotalMigratedSize%s" % se, totalSize)
     gMonitor.addMark("ChecksumMismatches%s" % se, len(mismatchingFiles))
     gMonitor.addMark("TotalChecksumMismatches%s" % se,
                      len(mismatchingFiles))
     gMonitor.addMark("ChecksumMatches%s" % se, len(matchingFiles))
     gMonitor.addMark("TotalChecksumMatches%s" % se, len(matchingFiles))
     if allMigrated:
         gLogger.info(
             '[%s] __updateMigrationAccounting: Attempting to send accounting message...'
             % se)
         return gDataStoreClient.commit()
     return S_OK()
Exemple #28
0
    def execute(self,
                production_id=None,
                prod_job_id=None,
                wms_job_id=None,
                workflowStatus=None,
                stepStatus=None,
                wf_commons=None,
                step_commons=None,
                step_number=None,
                step_id=None):
        """ Main execution method.

        Here we analyse what is written in the XML summary and the pool XML, and send accounting
    """

        try:
            super(AnalyseFileAccess,
                  self).execute(self.version, production_id, prod_job_id,
                                wms_job_id, workflowStatus, stepStatus,
                                wf_commons, step_commons, step_number, step_id)

            self._resolveInputVariables()

            self.log.info("Analyzing root access from %s and %s" %
                          (self.XMLSummary, self.poolXMLCatName))

            pfn_lfn = {}
            lfn_guid = {}

            lfn_pfn_fail = {}
            successful_lfn = set()

            for guid in self.poolXMLCatName_o.files:
                pFile = self.poolXMLCatName_o.files[guid]
                lfn = pFile.lfns[0]  # there can be only one
                lfn_guid[lfn] = guid
                self.lfn_pfn[lfn] = []
                for pfn, _ftype, se in pFile.pfns:
                    pfn_lfn[pfn] = lfn
                    self.pfn_se[pfn] = se
                    self.lfn_pfn[lfn].append(pfn)

            for inputFile, status in self.XMLSummary_o.inputStatus:

                # The inputFile starts with 'LFN:' or 'PFN:'
                cleanedName = inputFile[4:]
                if status == 'full':
                    # it is an LFN
                    successful_lfn.add(cleanedName)
                elif status == 'fail':
                    # it is a PFN
                    lfn = pfn_lfn.get(cleanedName)
                    if not lfn:
                        self.log.error(
                            "Failed pfn %s is not listed in the catalog" %
                            cleanedName)
                        continue
                    lfn_pfn_fail.setdefault(lfn, []).append(cleanedName)
                else:
                    # intermediate status, think of it...
                    pass

            # The lfn in successful and not in lfn_pfn_failed succeeded immediately
            immediately_successful = successful_lfn - set(lfn_pfn_fail)

            for lfn in immediately_successful:
                # We take the first replica in the catalog
                pfn = self.__getNthPfnForLfn(lfn, 0)
                remoteSE = self.pfn_se.get(pfn)

                if not remoteSE:
                    continue

                oDataOperation = self.__initialiseAccountingObject(
                    remoteSE, True)
                gDataStoreClient.addRegister(oDataOperation)

            # For each file that had failure
            for lfn in lfn_pfn_fail:
                failedPfns = lfn_pfn_fail[lfn]

                # We add the accounting for the failure
                for pfn in failedPfns:
                    remoteSE = self.pfn_se.get(pfn)
                    if not remoteSE:
                        continue

                    oDataOperation = self.__initialiseAccountingObject(
                        remoteSE, False)
                    gDataStoreClient.addRegister(oDataOperation)

                # If there were more options to try, the next one is successful
                if len(failedPfns) < len(self.lfn_pfn[lfn]):
                    pfn = self.__getNthPfnForLfn(lfn, len(failedPfns))
                    remoteSE = self.pfn_se.get(pfn)

                    if not remoteSE:
                        continue

                    oDataOperation = self.__initialiseAccountingObject(
                        remoteSE, True)
                    gDataStoreClient.addRegister(oDataOperation)

            gDataStoreClient.commit()

        except Exception as e:  #pylint:disable=broad-except
            self.log.warn(str(e))

        finally:
            super(AnalyseFileAccess, self).finalize(self.version)

        return S_OK()
Exemple #29
0
    def __executeMethod(self, lfn, *args, **kwargs):
        """ Forward the call to each storage in turn until one works.
        The method to be executed is stored in self.methodName
        :param lfn : string, list or dictionary
        :param *args : variable amount of non-keyword arguments. SHOULD BE EMPTY
        :param **kwargs : keyword arguments
        :returns S_OK( { 'Failed': {lfn : reason} , 'Successful': {lfn : value} } )
                The Failed dict contains the lfn only if the operation failed on all the storages
                The Successful dict contains the value returned by the successful storages.

        A special kwargs is 'inputProtocol', which can be specified for putFile. It describes
        the protocol used as source protocol, since there is in principle only one.
    """

        removedArgs = {}
        log = self.log.getSubLogger('__executeMethod')
        log.verbose("preparing the execution of %s" % (self.methodName))

        # args should normaly be empty to avoid problem...
        if len(args):
            log.verbose("args should be empty!%s" % args)
            # because there is normally only one kw argument, I can move it from args to kwargs
            methDefaultArgs = StorageElementItem.__defaultsArguments.get(
                self.methodName, {}).keys()
            if len(methDefaultArgs):
                kwargs[methDefaultArgs[0]] = args[0]
                args = args[1:]
            log.verbose(
                "put it in kwargs, but dirty and might be dangerous!args %s kwargs %s"
                % (args, kwargs))

        # We check the deprecated arguments
        for depArg in StorageElementItem.__deprecatedArguments:
            if depArg in kwargs:
                log.verbose(
                    "%s is not an allowed argument anymore. Please change your code!"
                    % depArg)
                removedArgs[depArg] = kwargs[depArg]
                del kwargs[depArg]

        # Set default argument if any
        methDefaultArgs = StorageElementItem.__defaultsArguments.get(
            self.methodName, {})
        for argName in methDefaultArgs:
            if argName not in kwargs:
                log.debug("default argument %s for %s not present.\
         Setting value %s." %
                          (argName, self.methodName, methDefaultArgs[argName]))
                kwargs[argName] = methDefaultArgs[argName]

        res = checkArgumentFormat(lfn)
        if not res['OK']:
            errStr = "Supplied lfns must be string, list of strings or a dictionary."
            log.debug(errStr)
            return res
        lfnDict = res['Value']

        log.verbose("Attempting to perform '%s' operation with %s lfns." %
                    (self.methodName, len(lfnDict)))

        res = self.isValid(operation=self.methodName)
        if not res['OK']:
            return res
        else:
            if not self.valid:
                return S_ERROR(self.errorReason)
        # In case executing putFile, we can assume that all the source urls
        # are from the same protocol. This optional parameter, if defined
        # can be used to ignore some storage plugins and thus save time
        # and avoid fake failures showing in the accounting
        inputProtocol = kwargs.pop('inputProtocol', None)

        successful = {}
        failed = {}
        filteredPlugins = self.__filterPlugins(self.methodName,
                                               kwargs.get('protocols'),
                                               inputProtocol)
        if not filteredPlugins:
            return S_ERROR( errno.EPROTONOSUPPORT, "No storage plugins matching the requirements\
                                           (operation %s protocols %s inputProtocol %s)"      \
                                                  % ( self.methodName, kwargs.get( 'protocols' ), inputProtocol ) )
        # Try all of the storages one by one
        for storage in filteredPlugins:
            # Determine whether to use this storage object
            storageParameters = storage.getParameters()
            pluginName = storageParameters['PluginName']

            if not lfnDict:
                log.debug("No lfns to be attempted for %s protocol." %
                          pluginName)
                continue

            log.verbose("Generating %s protocol URLs for %s." %
                        (len(lfnDict), pluginName))
            replicaDict = kwargs.pop('replicaDict', {})
            if storage.pluginName != "Proxy":
                res = self.__generateURLDict(lfnDict,
                                             storage,
                                             replicaDict=replicaDict)
                urlDict = res['Value']['Successful']  # url : lfn
                failed.update(res['Value']['Failed'])
            else:
                urlDict = dict([(lfn, lfn) for lfn in lfnDict])
            if not len(urlDict):
                log.verbose(
                    "__executeMethod No urls generated for protocol %s." %
                    pluginName)
            else:
                log.verbose(
                    "Attempting to perform '%s' for %s physical files" %
                    (self.methodName, len(urlDict)))
                fcn = None
                if hasattr(storage, self.methodName) and callable(
                        getattr(storage, self.methodName)):
                    fcn = getattr(storage, self.methodName)
                if not fcn:
                    return S_ERROR(
                        DErrno.ENOMETH,
                        "SE.__executeMethod: unable to invoke %s, it isn't a member function of storage"
                    )
                urlsToUse = {
                }  # url : the value of the lfn dictionary for the lfn of this url
                for url in urlDict:
                    urlsToUse[url] = lfnDict[urlDict[url]]

                startDate = datetime.datetime.utcnow()
                startTime = time.time()
                res = fcn(urlsToUse, *args, **kwargs)
                elapsedTime = time.time() - startTime

                self.addAccountingOperation(urlsToUse, startDate, elapsedTime,
                                            storageParameters, res)

                if not res['OK']:
                    errStr = "Completely failed to perform %s." % self.methodName
                    log.debug(
                        errStr,
                        'with plugin %s: %s' % (pluginName, res['Message']))
                    for lfn in urlDict.values():
                        if lfn not in failed:
                            failed[lfn] = ''
                        failed[lfn] = "%s %s" % (
                            failed[lfn],
                            res['Message']) if failed[lfn] else res['Message']

                else:
                    for url, lfn in urlDict.items():
                        if url not in res['Value']['Successful']:
                            if lfn not in failed:
                                failed[lfn] = ''
                            if url in res['Value']['Failed']:
                                self.log.debug(res['Value']['Failed'][url])
                                failed[lfn] = "%s %s" % (
                                    failed[lfn],
                                    res['Value']['Failed'][url]) if failed[
                                        lfn] else res['Value']['Failed'][url]
                            else:
                                errStr = 'No error returned from plug-in'
                                failed[lfn] = "%s %s" % (
                                    failed[lfn],
                                    errStr) if failed[lfn] else errStr
                        else:
                            successful[lfn] = res['Value']['Successful'][url]
                            if lfn in failed:
                                failed.pop(lfn)
                            lfnDict.pop(lfn)

        gDataStoreClient.commit()

        return S_OK({'Failed': failed, 'Successful': successful})
  def topDirectoryAccounting( self ):
    self.log.notice( "-------------------------------------------------------------------------------------\n" )
    self.log.notice( "Generate accounting records for top directories " )
    self.log.notice( "-------------------------------------------------------------------------------------\n" )

    ftb = 1.0e12

    # get info from the DB about the LOGICAL STORAGE USAGE (from the su_Directory table):
    result = self.__stDB.getSummary( '/lhcb/' )
    if not result[ 'OK' ]:
      return result
    logicalUsage = result['Value']
    topDirLogicalUsage = {}  # build the list of first level directories
    for row in logicalUsage:
      # d, size, files = row
      splitDir = row.split( "/" )
      if len( splitDir ) > 3:  # skip the root directory "/lhcb/"
        firstLevelDir = '/' + splitDir[1] + '/' + splitDir[2] + '/'
        topDirLogicalUsage.setdefault( firstLevelDir, {'Files':0, 'Size':0} )
        topDirLogicalUsage[ firstLevelDir ][ 'Files' ] += logicalUsage[ row ][ 'Files' ]
        topDirLogicalUsage[ firstLevelDir ][ 'Size' ] += logicalUsage[ row ][ 'Size' ]
    self.log.notice( "Summary on logical usage of top directories: " )
    for row in topDirLogicalUsage:
      self.log.notice( "dir: %s size: %.4f TB  files: %d" % ( row, topDirLogicalUsage[row]['Size'] / ftb,
                                                              topDirLogicalUsage[row]['Files'] ) )

    # loop on top level directories (/lhcb/data, /lhcb/user/, /lhcb/MC/, etc..)
    # to get the summary in terms of PHYSICAL usage grouped by SE:
    seData = {}
    for directory in topDirLogicalUsage:
      result = self.__stDB.getDirectorySummaryPerSE( directory )  # retrieve the PHYSICAL usage
      if not result[ 'OK' ]:
        return result
      seData[ directory ] = result[ 'Value' ]
      self.log.notice( "Got SE summary for %s directories " % ( len( seData ) ) )
      self.log.debug( "SEData: %s" % seData )
    # loop on top level directories to send the accounting records
    numRows = 0
    now = Time.dateTime()
    for directory in seData:
      self.log.debug( "dir: %s SEData: %s " % ( directory, seData[ directory ] ) )
      if directory not in topDirLogicalUsage:
        self.log.error( "Dir %s is in the summary per SE, but it is not in the logical files summary!" % directory )
        continue
      for se in sorted( seData[ directory ] ):
        storageRecord = Storage()
        storageRecord.setStartTime( now )
        storageRecord.setEndTime( now )
        storageRecord.setValueByKey( "Directory", directory )
        storageRecord.setValueByKey( "StorageElement", se )
        storageRecord.setValueByKey( "LogicalFiles", topDirLogicalUsage[ directory ][ 'Files' ] )
        storageRecord.setValueByKey( "LogicalSize", topDirLogicalUsage[ directory ][ 'Size' ] )
        try:
          physicalFiles = seData[ directory ][ se ][ 'Files' ]
        except:
          self.log.error( "WARNING! no files replicas for directory %s on SE %s" % ( directory, se ) )
          physicalFiles = 0
        try:
          physicalSize = seData[ directory ][ se ][ 'Size' ]
        except:
          self.log.error( "WARNING! no size for replicas for directory %s on SE %s" % ( directory, se ) )
          physicalSize = 0
        storageRecord.setValueByKey( "PhysicalFiles", physicalFiles )
        storageRecord.setValueByKey( "PhysicalSize", physicalSize )
        gDataStoreClient.addRegister( storageRecord )
        numRows += 1
        self.log.debug( "Directory: %s SE: %s  physical size: %.4f TB (%d files)" % ( directory,
                                                                                      se,
                                                                                      physicalSize / ftb,
                                                                                      physicalFiles ) )

    self.log.notice( "Sending %s records to accounting for top level directories storage" % numRows )
    res = gDataStoreClient.commit()
    if not res[ 'OK' ]:
      self.log.notice( "ERROR: committing Storage records: %s " % res )
      return S_ERROR( res )
    else:
      self.log.notice( "%s records for Storage type successfully committed" % numRows )