Beispiel #1
0
Datei: API.py Projekt: bmb/DIRAC
def _printFormattedDictList( dictList, fields, uniqueField, orderBy ):
  """ Will print ordered the supplied field of a list of dictionaries """
  orderDict = {}
  fieldWidths = {}
  dictFields = {}
  for myDict in dictList:
    for field in fields:
      fieldValue = myDict[field]
      if not fieldWidths.has_key( field ):
        fieldWidths[field] = len( str( field ) )
      if len( str( fieldValue ) ) > fieldWidths[field]:
        fieldWidths[field] = len( str( fieldValue ) )
    orderValue = myDict[orderBy]
    if not orderDict.has_key( orderValue ):
      orderDict[orderValue] = []
    orderDict[orderValue].append( myDict[uniqueField] )
    dictFields[myDict[uniqueField]] = myDict
  headString = "%s" % fields[0].ljust( fieldWidths[fields[0]] + 5 )
  for field in fields[1:]:
    headString = "%s %s" % ( headString, field.ljust( fieldWidths[field] + 5 ) )
  print headString
  for orderValue in sortList( orderDict.keys() ):
    uniqueFields = orderDict[orderValue]
    for uniqueField in sortList( uniqueFields ):
      myDict = dictFields[uniqueField]
      outStr = "%s" % str( myDict[fields[0]] ).ljust( fieldWidths[fields[0]] + 5 )
      for field in fields[1:]:
        outStr = "%s %s" % ( outStr, str( myDict[field] ).ljust( fieldWidths[field] + 5 ) )
      print outStr
Beispiel #2
0
    def do_addFile(self, args):
        """Add new files to transformation DB

    usage: addFile <lfn> [lfn]
    """
        argss = string.split(args)
        if not len(argss) > 0:
            print "no files supplied"
            return
        lfnDict = {}
        for lfn in argss:
            lfnDict[lfn] = {
                "PFN": "IGNORED-PFN",
                "SE": "IGNORED-SE",
                "Size": 0,
                "GUID": "IGNORED-GUID",
                "Checksum": "IGNORED-CHECKSUM",
            }
        res = self.server.addFile(lfnDict, force=True)
        if not res["OK"]:
            print "failed to add any files: %s" % res["Message"]
            return
        for lfn in sortList(res["Value"]["Failed"].keys()):
            error = res["Value"]["Failed"][lfn]
            print "failed to add %s: %s" % (lfn, error)
        for lfn in sortList(res["Value"]["Successful"].keys()):
            print "added %s" % lfn
Beispiel #3
0
 def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
     connection = self._getConnection(connection)
     for directoryID in sortList(directorySEDict.keys()):
         result = self.db.dtree.getPathIDsByID(directoryID)
         if not result["OK"]:
             return result
         parentIDs = result["Value"]
         dirDict = directorySEDict[directoryID]
         for seID in sortList(dirDict.keys()):
             seDict = dirDict[seID]
             files = seDict["Files"]
             size = seDict["Size"]
             for dirID in parentIDs:
                 req = (
                     "UPDATE FC_DirectoryUsage SET SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() "
                     % (change, size, change, files)
                 )
                 req += "WHERE DirID=%d AND SEID=%d;" % (dirID, seID)
                 res = self.db._update(req)
                 if not res["OK"]:
                     gLogger.warn("Failed to update FC_DirectoryUsage", res["Message"])
                 if res["Value"]:
                     continue
                 if change != "+":
                     gLogger.warn("Decrement of usage for DirID,SEID that didnt exist", "%d %d" % (dirID, seID))
                     continue
                 req = "INSERT INTO FC_DirectoryUsage (DirID, SEID, SESize, SEFiles, LastUpdate)"
                 req += " VALUES (%d, %d, %d, %d, UTC_TIMESTAMP());" % (dirID, seID, size, files)
                 res = self.db._update(req)
                 if not res["OK"]:
                     gLogger.warn("Failed to insert FC_DirectoryUsage", res["Message"])
     return S_OK()
Beispiel #4
0
    def test_getTransformations(self):
        """ Testing the selection of transformations from the database

          getTransformations
         
        This will select all the transformations associated to this test suite and remove them.
    """
        oTrans = Transformation()
        res = oTrans.getTransformations()
        self.assert_(res['OK'])
        parameters = [
            'TransformationID', 'TransformationName', 'Description',
            'LongDescription', 'CreationDate', 'LastUpdate', 'AuthorDN',
            'AuthorGroup', 'Type', 'Plugin', 'AgentType', 'Status', 'FileMask',
            'TransformationGroup', 'GroupSize', 'InheritedFrom', 'Body',
            'MaxNumberOfTasks', 'EventsPerTask'
        ]
        self.assertEqual(sortList(res['ParameterNames']), sortList(parameters))
        self.assertEqual(sortList(res['Value'][0].keys()),
                         sortList(parameters))
        self.assertEqual(len(res['Value']), len(res['Records']))
        ignore = self.transID
        for transDict in res['Value']:
            name = transDict['TransformationName']
            if re.search('TestClientTransformation', name):
                transID = transDict['TransformationID']
                if transID != ignore:
                    oTrans = Transformation(transID)
                    res = oTrans.deleteTransformation()
                    self.assert_(res['OK'])
        self.transID = ignore
Beispiel #5
0
def _printFormattedDictList(dictList, fields, uniqueField, orderBy):
    """ Will print ordered the supplied field of a list of dictionaries """
    orderDict = {}
    fieldWidths = {}
    dictFields = {}
    for myDict in dictList:
        for field in fields:
            fieldValue = myDict[field]
            if not fieldWidths.has_key(field):
                fieldWidths[field] = len(str(field))
            if len(str(fieldValue)) > fieldWidths[field]:
                fieldWidths[field] = len(str(fieldValue))
        orderValue = myDict[orderBy]
        if not orderDict.has_key(orderValue):
            orderDict[orderValue] = []
        orderDict[orderValue].append(myDict[uniqueField])
        dictFields[myDict[uniqueField]] = myDict
    headString = "%s" % fields[0].ljust(fieldWidths[fields[0]] + 5)
    for field in fields[1:]:
        headString = "%s %s" % (headString,
                                field.ljust(fieldWidths[field] + 5))
    print headString
    for orderValue in sortList(orderDict.keys()):
        uniqueFields = orderDict[orderValue]
        for uniqueField in sortList(uniqueFields):
            myDict = dictFields[uniqueField]
            outStr = "%s" % str(
                myDict[fields[0]]).ljust(fieldWidths[fields[0]] + 5)
            for field in fields[1:]:
                outStr = "%s %s" % (outStr, str(
                    myDict[field]).ljust(fieldWidths[field] + 5))
            print outStr
Beispiel #6
0
    def do_addReplica(self, args):
        ''' Add new replica to the transformation DB

    usage: addReplica <lfn> <se>
    '''
        argss = args.split()
        if not len(argss) == 2:
            print "no file info supplied"
            return
        lfn = argss[0]
        se = argss[1]
        lfnDict = {}
        lfnDict[lfn] = {
            'PFN': 'IGNORED-PFN',
            'SE': se,
            'Size': 0,
            'GUID': 'IGNORED-GUID',
            'Checksum': 'IGNORED-CHECKSUM'
        }
        res = self.server.addReplica(lfnDict, force=True)
        if not res['OK']:
            print "failed to add replica: %s" % res['Message']
            return
        for lfn in sortList(res['Value']['Failed'].keys()):
            error = res['Value']['Failed'][lfn]
            print "failed to add replica: %s" % (error)
        for lfn in sortList(res['Value']['Successful'].keys()):
            print "added %s" % lfn
Beispiel #7
0
  def __globalStatJob(self,siteName):

    RPC = getRPCClient("WorkloadManagement/JobMonitoring")
    result = RPC.getStates()
    stat = []
    if result["OK"] and len(result["Value"])>0:
      for i in result["Value"]:
        stat.append(str(i))
    result = RPC.getJobPageSummaryWeb({'Site': [siteName]},[["JobID","DESC"]],0,1,False)
    if result["OK"]:
      result = result["Value"]
      back = []
      if result.has_key("Extras"):
        extra = result["Extras"]
        if len(stat) > 0:
          for i in sortList(stat):
            if i in sortList(extra.keys()):
              back.append([i,extra[i]])
            else:
              back.append([i,"-"])
        else:
          for i in sortList(extra.keys()):
            back.append([i,extra[i]])
      c.result = {"success":"true","result":back}
      c.result = back
    else:
      c.result = {"success":"false","error":result["Message"]}
    gLogger.info("\033[0;31m R E S U L T: \033[0m",c.result)
    return c.result
 def test_ExtendCleanTransformation(self):
   """ Tests the extension of transformations and the removal of tasks. Also obtain tasks, their status and update their status.
       
         extendTransformation()
         getTransformationTasks()
         getTransformationTaskStats()
         deleteTasks()
         setTaskStatus()
         cleanTransformation()
       
       Tests a transformation can be extended.
       Tests can obtain the the transformation tasks and their statistics.
       Tests the removal of already created tasks.
       Tests can change the status of a task.
       Tests that Cleaning a transformation removes tasks defined for the transformation.
   """
   oTrans = Transformation(self.transID)
   nTasks = 100
   res = oTrans.extendTransformation(nTasks)
   self.assert_(res['OK'])
   taskIDs = res['Value']
   self.assertEqual(len(taskIDs),nTasks)
   res = oTrans.getTransformationTasks()
   self.assert_(res['OK'])
   parameters = ['TargetSE', 'TransformationID', 'LastUpdateTime', 'ExternalID', 'CreationTime', 'TaskID', 'ExternalStatus']
   self.assertEqual(sortList(res['ParameterNames']),sortList(parameters))
   self.assertEqual(sortList(res['Value'][0].keys()),sortList(parameters))
   self.assertEqual(res['Value'][0]['TargetSE'],'Unknown')
   self.assertEqual(res['Value'][0]['TransformationID'],self.transID)
   self.assertEqual(res['Value'][0]['ExternalID'],'0')
   self.assertEqual(res['Value'][0]['TaskID'],1)
   self.assertEqual(res['Value'][0]['ExternalStatus'],'Created')
   self.assertEqual(res['Records'][0][0],1)
   self.assertEqual(res['Records'][0][1],self.transID)
   self.assertEqual(res['Records'][0][2],'Created')
   self.assertEqual(res['Records'][0][3],'0')
   self.assertEqual(res['Records'][0][4],'Unknown')
   res = oTrans.getTransformationTaskStats()
   self.assert_(res['OK'])
   self.assertEqual(res['Value']['Created'],100)
   res = oTrans.deleteTasks(11,100)
   self.assert_(res['OK'])
   res = oTrans.getTransformationTaskStats()
   self.assert_(res['OK'])
   self.assertEqual(res['Value']['Created'],10)
   res = oTrans.setTaskStatus(1, 'Done')
   self.assert_(res['OK'])
   res = oTrans.getTransformationTaskStats()
   self.assert_(res['OK'])
   self.assertEqual(res['Value']['Created'],10)
   self.assertEqual(res['Value']['Done'],1)
   res = oTrans.cleanTransformation()
   self.assert_(res['OK'])
   res = oTrans.getStatus()
   self.assert_(res['OK'])
   self.assertEqual(res['Value'],'Cleaned')
   res = oTrans.getTransformationTasks()
   self.assert_(res['OK'])
   self.assertFalse(res['Value'])
   self.assertFalse(res['Records'])
Beispiel #9
0
    def __globalStatJob(self, siteName):

        RPC = getRPCClient("WorkloadManagement/JobMonitoring")
        result = RPC.getStates()
        stat = []
        if result["OK"] and len(result["Value"]) > 0:
            for i in result["Value"]:
                stat.append(str(i))
        result = RPC.getJobPageSummaryWeb({'Site': [siteName]},
                                          [["JobID", "DESC"]], 0, 1, False)
        if result["OK"]:
            result = result["Value"]
            back = []
            if result.has_key("Extras"):
                extra = result["Extras"]
                if len(stat) > 0:
                    for i in sortList(stat):
                        if i in sortList(extra.keys()):
                            back.append([i, extra[i]])
                        else:
                            back.append([i, "-"])
                else:
                    for i in sortList(extra.keys()):
                        back.append([i, extra[i]])
            c.result = {"success": "true", "result": back}
            c.result = back
        else:
            c.result = {"success": "false", "error": result["Message"]}
        gLogger.info("\033[0;31m R E S U L T: \033[0m", c.result)
        return c.result
Beispiel #10
0
    def do_removeReplica(self, args):
        """Remove replica from the transformation DB

    usage: removeReplica <lfn> <se>
    """
        argss = string.split(args)
        if not len(argss) == 2:
            print "no file info supplied"
            return
        lfn = argss[0]
        se = argss[1]
        lfnDict = {}
        lfnDict[lfn] = {
            "PFN": "IGNORED-PFN",
            "SE": se,
            "Size": 0,
            "GUID": "IGNORED-GUID",
            "Checksum": "IGNORED-CHECKSUM",
        }
        res = self.server.removeReplica(lfnDict)
        if not res["OK"]:
            print "failed to remove replica: %s" % res["Message"]
            return
        for lfn in sortList(res["Value"]["Failed"].keys()):
            error = res["Value"]["Failed"][lfn]
            print "failed to remove replica: %s" % (error)
        for lfn in sortList(res["Value"]["Successful"].keys()):
            print "removed %s" % lfn
Beispiel #11
0
    def dump(self):
        """
      Dump to the logger a summary of the StorageElement items
    """
        gLogger.info(
            "StorageElement.dump: Preparing dump for StorageElement %s." %
            self.name)
        if not self.valid:
            gLogger.error(
                "StorageElement.dump: Failed to create StorageElement plugins.",
                self.errorReason)
            return
        i = 1
        outStr = "\n\n============ Options ============\n"
        for key in sortList(self.options.keys()):
            outStr = "%s%s: %s\n" % (outStr, key.ljust(15), self.options[key])

        for storage in self.storages:
            outStr = "%s============Protocol %s ============\n" % (outStr, i)
            res = storage.getParameters()
            storageParameters = res['Value']
            for key in sortList(storageParameters.keys()):
                outStr = "%s%s: %s\n" % (outStr, key.ljust(15),
                                         storageParameters[key])
            i = i + 1
        gLogger.info(outStr)
Beispiel #12
0
    def do_setReplicaStatus(self, args):
        """Set replica status, usually used to mark a replica Problematic

    usage: setReplicaStatus <lfn> <status> <se>
    """
        argss = string.split(args)
        if not len(argss) > 2:
            print "no file info supplied"
            return
        lfn = argss[0]
        status = argss[1]
        se = argss[2]
        lfnDict = {}
        lfnDict[lfn] = {
            "Status": status,
            "PFN": "IGNORED-PFN",
            "SE": se,
            "Size": 0,
            "GUID": "IGNORED-GUID",
            "Checksum": "IGNORED-CHECKSUM",
        }
        res = self.server.setReplicaStatus(lfnDict)
        if not res["OK"]:
            print "failed to set replica status: %s" % res["Message"]
            return
        for lfn in sortList(res["Value"]["Failed"].keys()):
            error = res["Value"]["Failed"][lfn]
            print "failed to set replica status: %s" % (error)
        for lfn in sortList(res["Value"]["Successful"].keys()):
            print "updated replica status %s" % lfn
Beispiel #13
0
 def _updateDirectoryUsage( self, directorySEDict, change, connection = False ):
   connection = self._getConnection( connection )
   for dirID in sortList( directorySEDict.keys() ):
     dirDict = directorySEDict[dirID]
     for seID in sortList( dirDict.keys() ):
       seDict = dirDict[seID]
       files = seDict['Files']
       size = seDict['Size']
       req = "UPDATE FC_DirectoryUsage SET SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() " \
                                                        % ( change, size, change, files )
       req += "WHERE DirID=%d AND SEID=%d;" % ( dirID, seID )
       res = self.db._update( req )
       if not res['OK']:
         gLogger.warn( "Failed to update FC_DirectoryUsage", res['Message'] )
       if res['Value']:
         continue
       if  change != '+':
         gLogger.warn( "Decrement of usage for DirID,SEID that didnt exist", "%d %d" % ( dirID, seID ) )
         continue
       req = "INSERT INTO FC_DirectoryUsage (DirID, SEID, SESize, SEFiles, LastUpdate)"
       req += " VALUES (%d, %d, %d, %d, UTC_TIMESTAMP());" % ( dirID, seID, size, files )
       res = self.db._update( req )
       if not res['OK']:
         gLogger.warn( "Failed to insert FC_DirectoryUsage", res['Message'] )
   return S_OK()
Beispiel #14
0
    def do_setReplicaStatus(self, args):
        """Set replica status, usually used to mark a replica Problematic

    usage: setReplicaStatus <lfn> <status> <se>
    """
        argss = string.split(args)
        if not len(argss) > 2:
            print "no file info supplied"
            return
        lfn = argss[0]
        status = argss[1]
        se = argss[2]
        lfnDict = {}
        lfnDict[lfn] = {
            'Status': status,
            'PFN': 'IGNORED-PFN',
            'SE': se,
            'Size': 0,
            'GUID': 'IGNORED-GUID',
            'Checksum': 'IGNORED-CHECKSUM'
        }
        res = self.server.setReplicaStatus(lfnDict)
        if not res['OK']:
            print "failed to set replica status: %s" % res['Message']
            return
        for lfn in sortList(res['Value']['Failed'].keys()):
            error = res['Value']['Failed'][lfn]
            print "failed to set replica status: %s" % (error)
        for lfn in sortList(res['Value']['Successful'].keys()):
            print "updated replica status %s" % lfn
  def initialize( self ):
    """Sets defaults """
    self.replicaManager = ReplicaManager()
    self.transClient = TransformationClient()
    self.wmsClient = WMSClient()
    self.requestClient = RequestClient()
    self.metadataClient = FileCatalogClient()
    self.storageUsageClient = StorageUsageClient()

    # This sets the Default Proxy to used as that defined under 
    # /Operations/Shifter/DataManager
    # the shifterProxy option in the Configuration can be used to change this default.
    self.am_setOption( 'shifterProxy', 'DataManager' )

    self.transformationTypes = sortList( self.am_getOption( 'TransformationTypes', ['MCSimulation', 'DataReconstruction', 'DataStripping', 'MCStripping', 'Merge', 'Replication'] ) )
    gLogger.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
    self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', ['TransformationDB', 'StorageUsage', 'MetadataCatalog'] ) )
    gLogger.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
    self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
    gLogger.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
    self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
    gLogger.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
    self.activeStorages = sortList( self.am_getOption( 'ActiveSEs', [] ) )
    gLogger.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
    self.logSE = self.am_getOption( 'TransformationLogSE', 'LogSE' )
    gLogger.info( "Will remove logs found on storage element: %s" % self.logSE )
    return S_OK()
Beispiel #16
0
    def do_removeReplica(self, args):
        """Remove replica from the transformation DB

    usage: removeReplica <lfn> <se>
    """
        argss = string.split(args)
        if not len(argss) == 2:
            print "no file info supplied"
            return
        lfn = argss[0]
        se = argss[1]
        lfnDict = {}
        lfnDict[lfn] = {
            'PFN': 'IGNORED-PFN',
            'SE': se,
            'Size': 0,
            'GUID': 'IGNORED-GUID',
            'Checksum': 'IGNORED-CHECKSUM'
        }
        res = self.server.removeReplica(lfnDict)
        if not res['OK']:
            print "failed to remove replica: %s" % res['Message']
            return
        for lfn in sortList(res['Value']['Failed'].keys()):
            error = res['Value']['Failed'][lfn]
            print "failed to remove replica: %s" % (error)
        for lfn in sortList(res['Value']['Successful'].keys()):
            print "removed %s" % lfn
Beispiel #17
0
    def do_addFile(self, args):
        """Add new files to transformation DB

    usage: addFile <lfn> [lfn]
    """
        argss = string.split(args)
        if not len(argss) > 0:
            print "no files supplied"
            return
        lfnDict = {}
        for lfn in argss:
            lfnDict[lfn] = {
                'PFN': 'IGNORED-PFN',
                'SE': 'IGNORED-SE',
                'Size': 0,
                'GUID': 'IGNORED-GUID',
                'Checksum': 'IGNORED-CHECKSUM'
            }
        res = self.server.addFile(lfnDict, force=True)
        if not res['OK']:
            print "failed to add any files: %s" % res['Message']
            return
        for lfn in sortList(res['Value']['Failed'].keys()):
            error = res['Value']['Failed'][lfn]
            print "failed to add %s: %s" % (lfn, error)
        for lfn in sortList(res['Value']['Successful'].keys()):
            print "added %s" % lfn
Beispiel #18
0
 def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
     connection = self._getConnection(connection)
     for dirID in sortList(directorySEDict.keys()):
         dirDict = directorySEDict[dirID]
         for seID in sortList(dirDict.keys()):
             seDict = dirDict[seID]
             files = seDict['Files']
             size = seDict['Size']
             req = "UPDATE FC_DirectoryUsage SET SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() " \
                                                              % ( change, size, change, files )
             req += "WHERE DirID=%d AND SEID=%d;" % (dirID, seID)
             res = self.db._update(req)
             if not res['OK']:
                 gLogger.warn("Failed to update FC_DirectoryUsage",
                              res['Message'])
             if res['Value']:
                 continue
             if change != '+':
                 gLogger.warn(
                     "Decrement of usage for DirID,SEID that didnt exist",
                     "%d %d" % (dirID, seID))
                 continue
             req = "INSERT INTO FC_DirectoryUsage (DirID, SEID, SESize, SEFiles, LastUpdate)"
             req += " VALUES (%d, %d, %d, %d, UTC_TIMESTAMP());" % (
                 dirID, seID, size, files)
             res = self.db._update(req)
             if not res['OK']:
                 gLogger.warn("Failed to insert FC_DirectoryUsage",
                              res['Message'])
     return S_OK()
  def __checkPhysicalFiles( self, replicas, catalogMetadata, ses = [] ):
    """ This obtains the physical file metadata and checks the metadata against the catalog entries
    """
    sePfns = {}
    pfnLfns = {}
    for lfn, replicaDict in replicas.items():
      for se, pfn in replicaDict.items():
        if ( ses ) and ( se not in ses ):
          continue
        if not sePfns.has_key( se ):
          sePfns[se] = []
        sePfns[se].append( pfn )
        pfnLfns[pfn] = lfn
    gLogger.info( '%s %s' % ( 'Storage Element'.ljust( 20 ), 'Replicas'.rjust( 20 ) ) )
    for site in sortList( sePfns.keys() ):
      files = len( sePfns[site] )
      gLogger.info( '%s %s' % ( site.ljust( 20 ), str( files ).rjust( 20 ) ) )

    for se in sortList( sePfns.keys() ):
      pfns = sePfns[se]
      pfnDict = {}
      for pfn in pfns:
        pfnDict[pfn] = pfnLfns[pfn]
      sizeMismatch = []
      res = self.__checkPhysicalFileMetadata( pfnDict, se )
      if not res['OK']:
        gLogger.error( 'Failed to get physical file metadata.', res['Message'] )
        return res
      for pfn, metadata in res['Value'].items():
        if catalogMetadata.has_key( pfnLfns[pfn] ):
          if ( metadata['Size'] != catalogMetadata[pfnLfns[pfn]]['Size'] ) and ( metadata['Size'] != 0 ):
            sizeMismatch.append( ( pfnLfns[pfn], pfn, se, 'CatalogPFNSizeMismatch' ) )
      if sizeMismatch:
        self.__reportProblematicReplicas( sizeMismatch, se, 'CatalogPFNSizeMismatch' )
    return S_OK()
Beispiel #20
0
    def test_AddFilesGetFilesSetFileStatus(self):
        """ Testing adding, getting and setting file status.

          addFilesToTransformation()
          getTransformationFiles()
          getTransformationStats()
          setFileStatusForTransformation()
          addTaskForTransformation()

        Test adding and files to transformation.
        Test selecting the files for the transformation.
        Test getting the status count of the transformation files.
        Test setting the file status for transformation.
        Test creating a task for the added files and ensure the status is updated correctly.
    """
        oTrans = Transformation(self.transID)
        lfns = ['/test/lfn/file1', '/test/lfn/file2']
        res = oTrans.addFilesToTransformation(lfns)
        self.assert_(res['OK'])
        res = oTrans.getTransformationFiles()
        self.assert_(res['OK'])
        self.assertEqual(sortList(lfns), res['LFNs'])
        self.assertEqual(len(lfns), len(res['Records']))
        self.assertEqual(len(lfns), len(res['Value']))
        fileParams = sortList([
            'LFN', 'TransformationID', 'FileID', 'Status', 'TaskID',
            'TargetSE', 'UsedSE', 'ErrorCount', 'LastUpdate', 'InsertedTime'
        ])
        self.assertEqual(fileParams, sortList(res['ParameterNames']))
        self.assertEqual(res['Records'][0][0], lfns[0])
        self.assertEqual(res['Value'][0]['LFN'], lfns[0])
        self.assertEqual(res['Records'][0][1], self.transID)
        self.assertEqual(res['Value'][0]['TransformationID'], self.transID)
        self.assertEqual(res['Records'][0][3], 'Unused')
        self.assertEqual(res['Value'][0]['Status'], 'Unused')
        res = oTrans.getTransformationStats()
        self.assert_(res['OK'])
        self.assertEqual(res['Value']['Total'], 2)
        self.assertEqual(res['Value']['Unused'], 2)
        res = oTrans.setFileStatusForTransformation('Processed', [lfns[0]])
        self.assert_(res['OK'])
        res = oTrans.getTransformationStats()
        self.assert_(res['OK'])
        self.assertEqual(res['Value']['Total'], 2)
        self.assertEqual(res['Value']['Unused'], 1)
        self.assertEqual(res['Value']['Processed'], 1)
        res = oTrans.setFileStatusForTransformation('Unused', [lfns[0]])
        self.assert_(res['OK'])
        self.assert_(res['Value']['Failed'].has_key(lfns[0]))
        self.assertEqual(res['Value']['Failed'][lfns[0]],
                         'Can not change Processed status')
        res = oTrans.addTaskForTransformation(lfns=[lfns[1]], se='Test')
        self.assert_(res['OK'])
        res = oTrans.getTransformationStats()
        self.assert_(res['OK'])
        self.assertEqual(res['Value']['Total'], 2)
        self.assertEqual(res['Value']['Assigned'], 1)
        self.assertEqual(res['Value']['Processed'], 1)
Beispiel #21
0
    def _ByShare(self, shareType='CPU'):
        """ first get the shares from the CS, and then makes the grouping looking at the history
    """
        res = self._getShares(shareType, normalise=True)
        if not res['OK']:
            return res
        cpuShares = res['Value']
        gLogger.info("Obtained the following target shares (%):")
        for site in sortList(cpuShares.keys()):
            gLogger.info("%s: %.1f" % (site.ljust(15), cpuShares[site]))

        # Get the existing destinations from the transformationDB
        res = self._getExistingCounters(requestedSites=cpuShares.keys())
        if not res['OK']:
            gLogger.error("Failed to get existing file share", res['Message'])
            return res
        existingCount = res['Value']
        if existingCount:
            gLogger.info("Existing site utilization (%):")
            normalisedExistingCount = self._normaliseShares(
                existingCount.copy())
            for se in sortList(normalisedExistingCount.keys()):
                gLogger.info("%s: %.1f" %
                             (se.ljust(15), normalisedExistingCount[se]))

        # Group the input files by their existing replicas
        res = self._groupByReplicas()
        if not res['OK']:
            return res
        replicaGroups = res['Value']

        tasks = []
        # For the replica groups
        for replicaSE, lfns in replicaGroups:
            possibleSEs = replicaSE.split(',')
            # Determine the next site based on requested shares, existing usage and candidate sites
            res = self._getNextSite(
                existingCount,
                cpuShares,
                candidates=self._getSitesForSEs(possibleSEs))
            if not res['OK']:
                gLogger.error("Failed to get next destination SE",
                              res['Message'])
                continue
            targetSite = res['Value']
            # Resolve the ses for the target site
            res = getSEsForSite(targetSite)
            if not res['OK']:
                continue
            ses = res['Value']
            # Determine the selected SE and create the task
            for chosenSE in ses:
                if chosenSE in possibleSEs:
                    tasks.append((chosenSE, lfns))
                    if not existingCount.has_key(targetSite):
                        existingCount[targetSite] = 0
                    existingCount[targetSite] += len(lfns)
        return S_OK(tasks)
  def test_AddFilesGetFilesSetFileStatus(self):
    """ Testing adding, getting and setting file status.

          addFilesToTransformation()
          getTransformationFiles()
          getTransformationStats()
          setFileStatusForTransformation()
          addTaskForTransformation()

        Test adding and files to transformation.
        Test selecting the files for the transformation.
        Test getting the status count of the transformation files.
        Test setting the file status for transformation.
        Test creating a task for the added files and ensure the status is updated correctly.
    """
    oTrans = Transformation(self.transID)
    lfns = ['/test/lfn/file1','/test/lfn/file2']
    res = oTrans.addFilesToTransformation(lfns)
    self.assert_(res['OK'])
    res = oTrans.getTransformationFiles()
    self.assert_(res['OK'])
    self.assertEqual(sortList(lfns),res['LFNs'])
    self.assertEqual(len(lfns),len(res['Records']))
    self.assertEqual(len(lfns),len(res['Value']))
    fileParams = sortList(['LFN', 'TransformationID', 'FileID', 'Status', 'TaskID', 'TargetSE', 'UsedSE', 'ErrorCount', 'LastUpdate', 'InsertedTime'])
    self.assertEqual(fileParams,sortList(res['ParameterNames']))
    self.assertEqual(res['Records'][0][0], lfns[0])
    self.assertEqual(res['Value'][0]['LFN'],lfns[0]) 
    self.assertEqual(res['Records'][0][1], self.transID)
    self.assertEqual(res['Value'][0]['TransformationID'], self.transID)
    self.assertEqual(res['Records'][0][3],'Unused')
    self.assertEqual(res['Value'][0]['Status'],'Unused')
    res = oTrans.getTransformationStats()
    self.assert_(res['OK'])
    self.assertEqual(res['Value']['Total'],2)
    self.assertEqual(res['Value']['Unused'],2)
    res = oTrans.setFileStatusForTransformation('Processed',[lfns[0]])
    self.assert_(res['OK'])
    res = oTrans.getTransformationStats()
    self.assert_(res['OK'])
    self.assertEqual(res['Value']['Total'],2)
    self.assertEqual(res['Value']['Unused'],1)
    self.assertEqual(res['Value']['Processed'],1)
    res = oTrans.setFileStatusForTransformation('Unused',[lfns[0]])
    self.assert_(res['OK'])
    self.assert_(res['Value']['Failed'].has_key(lfns[0]))
    self.assertEqual(res['Value']['Failed'][lfns[0]],'Can not change Processed status')
    res = oTrans.addTaskForTransformation(lfns=[lfns[1]],se='Test')
    self.assert_(res['OK'])
    res = oTrans.getTransformationStats()
    self.assert_(res['OK'])
    self.assertEqual(res['Value']['Total'],2)
    self.assertEqual(res['Value']['Assigned'],1)
    self.assertEqual(res['Value']['Processed'],1)
 def __verifyPfns( self, pfnSizes, storageElements ):
   gLogger.info( 'Checking %s storage files exist in the catalog' % len( pfnSizes ) )
   pfnsToRemove = []
   incorrectlyRegistered = []
   allDone = True
   # First get all the PFNs as they should be registered in the catalog
   for pfns in breakListIntoChunks( sortList( pfnSizes.keys() ), 100 ):
     res = self.replicaManager.getPfnForProtocol( pfns, storageElements[0], withPort = False )
     if not res['OK']:
       allDone = False
       continue
     for pfn, error in res['Value']['Failed'].items():
       gLogger.error( 'Failed to obtain registered PFN for physical file', '%s %s' % ( pfn, error ) )
     if res['Value']['Failed']:
       allDone = False
     catalogStoragePfns = res['Value']['Successful']
     # Determine whether these PFNs are registered and if so obtain the LFN
     res = self.replicaManager.getCatalogLFNForPFN( catalogStoragePfns.values() )
     if not res['OK']:
       allDone = False
       continue
     for surl in sortList( res['Value']['Failed'].keys() ):
       if res['Value']['Failed'][surl] == 'No such file or directory':
         #pfnsToRemove.append(surl)
         print surl
       else:
         gLogger.error( 'Failed to get LFN for PFN', '%s %s' % ( surl, res['Value']['Failed'][surl] ) )
     existingLFNs = res['Value']['Successful'].values()
     if existingLFNs:
       res = self.replicaManager.getCatalogReplicas( existingLFNs )
       if not res['OK']:
         allDone = False
         continue
       for lfn, error in res['Value']['Failed'].items():
         gLogger.error( 'Failed to obtain registered replicas for LFN', '%s %s' % ( lfn, error ) )
       if res['Value']['Failed']:
         allDone = False
       for lfn, replicas in res['Value']['Successful'].items():
         match = False
         for storageElement in storageElements:
           if storageElement in replicas.keys():
             match = True
         if not match:
           pass#incorrectlyRegistered.append(lfn)
           #print lfn
   gLogger.info( "Verification of PFNs complete" )
   if incorrectlyRegistered:
     gLogger.info( "Found %d files incorrectly registered" % len( incorrectlyRegistered ) )
   if pfnsToRemove:
     gLogger.info( "Found %d files to be removed" % len( pfnsToRemove ) )
   resDict = {'Remove':pfnsToRemove, 'ReRegister':incorrectlyRegistered, 'AllDone':allDone}
   return S_OK( resDict )
  def _ByShare( self, shareType = 'CPU' ):
    """ first get the shares from the CS, and then makes the grouping looking at the history
    """
    res = self._getShares( shareType, normalise = True )
    if not res['OK']:
      return res
    cpuShares = res['Value']
    gLogger.info( "Obtained the following target shares (%):" )
    for site in sortList( cpuShares.keys() ):
      gLogger.info( "%s: %.1f" % ( site.ljust( 15 ), cpuShares[site] ) )

    # Get the existing destinations from the transformationDB
    res = self._getExistingCounters( requestedSites = cpuShares.keys() )
    if not res['OK']:
      gLogger.error( "Failed to get existing file share", res['Message'] )
      return res
    existingCount = res['Value']
    if existingCount:
      gLogger.info( "Existing site utilization (%):" )
      normalisedExistingCount = self._normaliseShares( existingCount.copy() )
      for se in sortList( normalisedExistingCount.keys() ):
        gLogger.info( "%s: %.1f" % ( se.ljust( 15 ), normalisedExistingCount[se] ) )

    # Group the input files by their existing replicas
    res = self._groupByReplicas()
    if not res['OK']:
      return res
    replicaGroups = res['Value']

    tasks = []
    # For the replica groups 
    for replicaSE, lfns in replicaGroups:
      possibleSEs = replicaSE.split( ',' )
      # Determine the next site based on requested shares, existing usage and candidate sites
      res = self._getNextSite( existingCount, cpuShares, candidates = self._getSitesForSEs( possibleSEs ) )
      if not res['OK']:
        gLogger.error( "Failed to get next destination SE", res['Message'] )
        continue
      targetSite = res['Value']
      # Resolve the ses for the target site
      res = getSEsForSite( targetSite )
      if not res['OK']:
        continue
      ses = res['Value']
      # Determine the selected SE and create the task 
      for chosenSE in ses:
        if chosenSE in possibleSEs:
          tasks.append( ( chosenSE, lfns ) )
          if not existingCount.has_key( targetSite ):
            existingCount[targetSite] = 0
          existingCount[targetSite] += len( lfns )
    return S_OK( tasks )
Beispiel #25
0
    def checkTransformationIntegrity(self, transID):
        """ This method contains the real work
    """
        gLogger.info("-" * 40)
        gLogger.info("Checking the integrity of transformation %s" % transID)
        gLogger.info("-" * 40)

        res = self.getTransformationDirectories(transID)
        if not res['OK']:
            return res
        directories = res['Value']
        if not directories:
            return S_OK()

        ######################################################
        #
        # This check performs Catalog->SE for possible output directories
        #
        res = self.replicaManager.getCatalogExists(directories)
        if not res['OK']:
            gLogger.error(res['Message'])
            return res
        for directory, error in res['Value']['Failed']:
            gLogger.error('Failed to determine existance of directory',
                          '%s %s' % (directory, error))
        if res['Value']['Failed']:
            return S_ERROR("Failed to determine the existance of directories")
        directoryExists = res['Value']['Successful']
        for directory in sortList(directoryExists.keys()):
            if not directoryExists[directory]:
                continue
            iRes = self.integrityClient.catalogDirectoryToSE(directory)
            if not iRes['OK']:
                gLogger.error(iRes['Message'])
                return iRes

        ######################################################
        #
        # This check performs SE->Catalog for possible output directories
        #
        for storageElementName in sortList(self.activeStorages):
            res = self.integrityClient.storageDirectoryToCatalog(
                directories, storageElementName)
            if not res['OK']:
                gLogger.error(res['Message'])
                return res

        gLogger.info("-" * 40)
        gLogger.info("Completed integrity check for transformation %s" %
                     transID)
        return S_OK()
Beispiel #26
0
def initializeFileCatalogHandler(serviceInfo):

    global fcDB

    serviceCS = serviceInfo['serviceSectionPath']

    # Instantiate the requested database
    dbLocation = gConfig.getValue('%s/Database' % serviceCS,
                                  'DataManagement/FileCatalogDB')
    fcDB = FileCatalogDB(dbLocation)

    databaseConfig = {}
    # Obtain the plugins to be used for DB interaction
    gLogger.info("Initializing with FileCatalog with following managers:")
    defaultManagers = {
        'UserGroupManager': 'UserAndGroupManagerDB',
        'SEManager': 'SEManagerDB',
        'SecurityManager': 'NoSecurityManager',
        'DirectoryManager': 'DirectoryLevelTree',
        'FileManager': 'FileManager',
        'DirectoryMetadata': 'DirectoryMetadata',
        'FileMetadata': 'FileMetadata'
    }
    for configKey in sortList(defaultManagers.keys()):
        defaultValue = defaultManagers[configKey]
        configValue = gConfig.getValue('%s/%s' % (serviceCS, configKey),
                                       defaultValue)
        gLogger.info("%s : %s" %
                     (str(configKey).ljust(20), str(configValue).ljust(20)))
        databaseConfig[configKey] = configValue

    # Obtain some general configuration of the database
    gLogger.info(
        "Initializing the FileCatalog with the following configuration:")
    defaultConfig = {
        'UniqueGUID': False,
        'GlobalReadAccess': True,
        'LFNPFNConvention': True,
        'ResolvePFN': True,
        'DefaultUmask': 0775,
        'VisibleStatus': ['AprioriGood']
    }
    for configKey in sortList(defaultConfig.keys()):
        defaultValue = defaultConfig[configKey]
        configValue = gConfig.getValue('%s/%s' % (serviceCS, configKey),
                                       defaultValue)
        gLogger.info("%s : %s" %
                     (str(configKey).ljust(20), str(configValue).ljust(20)))
        databaseConfig[configKey] = configValue
    res = fcDB.setConfig(databaseConfig)
    return res
Beispiel #27
0
def initializeFileCatalogHandler(serviceInfo):
    """ handler initialisation """

    global gFileCatalogDB

    dbLocation = getServiceOption(serviceInfo, 'Database',
                                  'DataManagement/FileCatalogDB')
    gFileCatalogDB = FileCatalogDB(dbLocation)

    databaseConfig = {}
    # Obtain the plugins to be used for DB interaction
    gLogger.info("Initializing with FileCatalog with following managers:")
    defaultManagers = {
        'UserGroupManager': 'UserAndGroupManagerDB',
        'SEManager': 'SEManagerDB',
        'SecurityManager': 'NoSecurityManager',
        'DirectoryManager': 'DirectoryLevelTree',
        'FileManager': 'FileManager',
        'DirectoryMetadata': 'DirectoryMetadata',
        'FileMetadata': 'FileMetadata',
        'DatasetManager': 'DatasetManager'
    }
    for configKey in sortList(defaultManagers.keys()):
        defaultValue = defaultManagers[configKey]
        configValue = getServiceOption(serviceInfo, configKey, defaultValue)
        gLogger.info("%-20s : %-20s" % (str(configKey), str(configValue)))
        databaseConfig[configKey] = configValue

    # Obtain some general configuration of the database
    gLogger.info(
        "Initializing the FileCatalog with the following configuration:")
    defaultConfig = {
        'UniqueGUID': False,
        'GlobalReadAccess': True,
        'LFNPFNConvention': 'Strong',
        'ResolvePFN': True,
        'DefaultUmask': 0775,
        'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
        'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
        'VisibleFileStatus': ['AprioriGood'],
        'VisibleReplicaStatus': ['AprioriGood']
    }
    for configKey in sortList(defaultConfig.keys()):
        defaultValue = defaultConfig[configKey]
        configValue = getServiceOption(serviceInfo, configKey, defaultValue)
        gLogger.info("%-20s : %-20s" % (str(configKey), str(configValue)))
        databaseConfig[configKey] = configValue
    res = gFileCatalogDB.setConfig(databaseConfig)
    return res
Beispiel #28
0
    def initialize(self):
        """ agent initialisation

    reading and setting confing opts

    :param self: self reference
    """
        # # shifter proxy
        self.am_setOption('shifterProxy', 'DataManager')
        # # transformations types
        self.dataProcTTypes = Operations().getValue(
            'Transformations/DataProcessing', ['MCSimulation', 'Merge'])
        self.dataManipTTypes = Operations().getValue(
            'Transformations/DataManipulation', ['Replication', 'Removal'])
        agentTSTypes = self.am_getOption('TransformationTypes', [])
        if agentTSTypes:
            self.transformationTypes = sortList(agentTSTypes)
        else:
            self.transformationTypes = sortList(self.dataProcTTypes +
                                                self.dataManipTTypes)
        self.log.info("Will consider the following transformation types: %s" %
                      str(self.transformationTypes))
        # # directory locations
        self.directoryLocations = sortList(
            self.am_getOption('DirectoryLocations',
                              ['TransformationDB', 'MetadataCatalog']))
        self.log.info(
            "Will search for directories in the following locations: %s" %
            str(self.directoryLocations))
        # # transformation metadata
        self.transfidmeta = self.am_getOption('TransfIDMeta',
                                              "TransformationID")
        self.log.info("Will use %s as metadata tag name for TransformationID" %
                      self.transfidmeta)
        # # archive periof in days
        self.archiveAfter = self.am_getOption('ArchiveAfter', 7)  # days
        self.log.info("Will archive Completed transformations after %d days" %
                      self.archiveAfter)
        # # active SEs
        self.activeStorages = sortList(self.am_getOption('ActiveSEs', []))
        self.log.info("Will check the following storage elements: %s" %
                      str(self.activeStorages))
        # # transformation log SEs
        self.logSE = self.am_getOption('TransformationLogSE', 'LogSE')
        self.log.info("Will remove logs found on storage element: %s" %
                      self.logSE)
        # # enable/disable execution, should be using CS option Status?? with default value as 'Active'??
        self.enableFlag = self.am_getOption('EnableFlag', 'True')
        return S_OK()
  def __init__( self, agentName, loadName, baseAgentName, properties = {} ):
    ''' c'tor
    '''
    AgentModule.__init__( self, agentName, loadName, baseAgentName, properties )

    self.transClient = TransformationClient()
    agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
    if agentTSTypes:
      self.transformationTypes = sortList( agentTSTypes )
    else:
      self.transformationTypes = sortList( Operations().getValue( 'Transformations/ExtendableTransfTypes',
                                                                  ['MCSimulation', 'Simulation'] ) )
    self.maxIterationTasks = self.am_getOption( 'TasksPerIteration', 50 )
    self.maxFailRate = self.am_getOption( 'MaxFailureRate', 30 )
    self.maxWaitingJobs = self.am_getOption( 'MaxWaitingJobs', 1000 )
Beispiel #30
0
    def __init__(self, *args, **kwargs):
        """ c'tor
    """
        AgentModule.__init__(self, *args, **kwargs)
        TransformationAgentsUtilities.__init__(self)

        #few parameters
        self.pluginLocation = self.am_getOption(
            'PluginLocation',
            'DIRAC.TransformationSystem.Agent.TransformationPlugin')
        self.transformationStatus = self.am_getOption(
            'transformationStatus', ['Active', 'Completing', 'Flush'])
        self.maxFiles = self.am_getOption('MaxFiles', 5000)

        agentTSTypes = self.am_getOption('TransformationTypes', [])
        if agentTSTypes:
            self.transformationTypes = sortList(agentTSTypes)
        else:
            dataProc = Operations().getValue('Transformations/DataProcessing',
                                             ['MCSimulation', 'Merge'])
            dataManip = Operations().getValue(
                'Transformations/DataManipulation', ['Replication', 'Removal'])
            self.transformationTypes = sortList(dataProc + dataManip)

        #clients
        self.transfClient = TransformationClient()

        #for the threading
        self.transQueue = Queue.Queue()
        self.transInQueue = []

        #for caching using a pickle file
        self.workDirectory = self.am_getWorkDirectory()
        self.cacheFile = os.path.join(self.workDirectory, 'ReplicaCache.pkl')
        self.dateWriteCache = datetime.datetime.utcnow()

        # Validity of the cache
        self.replicaCache = None
        self.replicaCacheValidity = self.am_getOption('ReplicaCacheValidity',
                                                      2)
        self.writingCache = False

        self.noUnusedDelay = self.am_getOption('NoUnusedDelay', 6)
        self.unusedFiles = {}
        self.unusedTimeStamp = {}

        self.debug = False
        self.transInThread = {}
  def checkTransformationIntegrity( self, transID ):
    """ This method contains the real work
    """
    gLogger.info( "-" * 40 )
    gLogger.info( "Checking the integrity of transformation %s" % transID )
    gLogger.info( "-" * 40 )

    res = self.getTransformationDirectories( transID )
    if not res['OK']:
      return res
    directories = res['Value']
    if not directories:
      return S_OK()

    ######################################################
    #
    # This check performs Catalog->SE for possible output directories
    #
    res = self.fc.exists( directories )
    if not res['OK']:
      gLogger.error( res['Message'] )
      return res
    for directory, error in res['Value']['Failed']:
      gLogger.error( 'Failed to determine existance of directory', '%s %s' % ( directory, error ) )
    if res['Value']['Failed']:
      return S_ERROR( "Failed to determine the existance of directories" )
    directoryExists = res['Value']['Successful']
    for directory in sortList( directoryExists.keys() ):
      if not directoryExists[directory]:
        continue
      iRes = self.integrityClient.catalogDirectoryToSE( directory )
      if not iRes['OK']:
        gLogger.error( iRes['Message'] )
        return iRes

    ######################################################
    #
    # This check performs SE->Catalog for possible output directories
    #
    for storageElementName in sortList( self.activeStorages ):
      res = self.integrityClient.storageDirectoryToCatalog( directories, storageElementName )
      if not res['OK']:
        gLogger.error( res['Message'] )
        return res

    gLogger.info( "-" * 40 )
    gLogger.info( "Completed integrity check for transformation %s" % transID )
    return S_OK()
Beispiel #32
0
 def retrieveRepositoryOutputDataLFNs(self, requestedStates = ['Done']):
   """Helper function
   
   Get the list of uploaded output data for a set of jobs in a repository
   
   @param requestedStates: List of states requested for filtering the list
   @type requestedStates: list of strings
   @return: list
   """
   llist = []
   if not self.jobRepo:
     gLogger.warn( "No repository is initialised" )
     return S_OK()
   jobs = self.jobRepo.readRepository()['Value']
   for jobID in sortList( jobs.keys() ):
     jobDict = jobs[jobID]
     if jobDict.has_key( 'State' ) and ( jobDict['State'] in requestedStates ):
       if ( jobDict.has_key( 'UserOutputData' ) and ( not int( jobDict['UserOutputData'] ) ) ) or \
       ( not jobDict.has_key( 'UserOutputData' ) ):
         params = self.parameters(int(jobID))
         if params['OK']:
           if params['Value'].has_key('UploadedOutputData'):
             lfn = params['Value']['UploadedOutputData']
             llist.append(lfn)
   return llist
Beispiel #33
0
  def resolveLFNZeroReplicas( self, problematicDict ):
    """ This takes the problematic dictionary returned by the integrity DB and resolves the LFNZeroReplicas prognosis
    """
    lfn = problematicDict['LFN']
    fileID = problematicDict['FileID']

    res = self.rm.getCatalogReplicas( lfn, allStatus = True, singleFile = True )
    if res['OK'] and res['Value']:
      gLogger.info( "LFNZeroReplicas file (%d) found to have replicas" % fileID )
    else:
      gLogger.info( "LFNZeroReplicas file (%d) does not have replicas. Checking storage..." % fileID )
      pfnsFound = False
      for storageElementName in sortList( gConfig.getValue( 'Resources/StorageElementGroups/Tier1_MC_M-DST', [] ) ):
        res = self.__getStoragePathExists( [lfn], storageElementName )
        if res['Value'].has_key( lfn ):
          gLogger.info( "LFNZeroReplicas file (%d) found storage file at %s" % ( fileID, storageElementName ) )
          pfn = res['Value'][lfn]
          self.__reportProblematicReplicas( [( lfn, pfn, storageElementName, 'PFNNotRegistered' )], storageElementName, 'PFNNotRegistered' )
          pfnsFound = True
      if not pfnsFound:
        gLogger.info( "LFNZeroReplicas file (%d) did not have storage files. Removing..." % fileID )
        res = self.rm.removeCatalogFile( lfn, singleFile = True )
        if not res['OK']:
          gLogger.error( res['Message'] )
          # Increment the number of retries for this file
          self.server.incrementProblematicRetry( fileID )
          return res
        gLogger.info( "LFNZeroReplicas file (%d) removed from catalog" % fileID )
    # If we get here the problem is solved so we can update the integrityDB
    return self.__updateCompletedFiles( 'LFNZeroReplicas', fileID )
Beispiel #34
0
 def submitTransformationTasks(self, taskDict):
     submitted = 0
     failed = 0
     startTime = time.time()
     for taskID in sortList(taskDict.keys()):
         if not taskDict[taskID]['TaskObject']:
             taskDict[taskID]['Success'] = False
             failed += 1
             continue
         res = self.submitTaskToExternal(taskDict[taskID]['TaskObject'])
         if res['OK']:
             taskDict[taskID]['ExternalID'] = res['Value']
             taskDict[taskID]['Success'] = True
             submitted += 1
         else:
             self.log.error("Failed to submit task to WMS", res['Message'])
             taskDict[taskID]['Success'] = False
             failed += 1
     self.log.info(
         'submitTransformationTasks: Submitted %d tasks to WMS in %.1f seconds'
         % (submitted, time.time() - startTime))
     if failed:
         self.log.info(
             'submitTransformationTasks: Failed to submit %d tasks to WMS.'
             % (failed))
     return S_OK(taskDict)
Beispiel #35
0
 def prepareTransformationTasks(self,
                                transBody,
                                taskDict,
                                owner='',
                                ownerGroup=''):
     requestType = 'transfer'
     requestOperation = 'replicateAndRegister'
     try:
         requestType, requestOperation = transBody.split(';')
     except:
         pass
     for taskID in sortList(taskDict.keys()):
         paramDict = taskDict[taskID]
         transID = paramDict['TransformationID']
         oRequest = RequestContainer(init=False)
         subRequestIndex = oRequest.initiateSubRequest(requestType)['Value']
         attributeDict = {
             'Operation': requestOperation,
             'TargetSE': paramDict['TargetSE']
         }
         oRequest.setSubRequestAttributes(subRequestIndex, requestType,
                                          attributeDict)
         files = []
         for lfn in paramDict['InputData'].split(';'):
             files.append({'LFN': lfn})
         oRequest.setSubRequestFiles(subRequestIndex, requestType, files)
         requestName = str(transID).zfill(8) + '_' + str(taskID).zfill(8)
         oRequest.setRequestAttributes({'RequestName': requestName})
         taskDict[taskID]['TaskObject'] = oRequest.toXML()['Value']
     return S_OK(taskDict)
  def getTransformationDirectories( self, transID ):
    ''' get the directories for the supplied transformation from the transformation system

    :param self: self reference
    :param int transID: transformation ID
    '''
    directories = []
    if 'TransformationDB' in self.directoryLocations:
      res = self.transClient.getTransformationParameters( transID, ['OutputDirectories'] )
      if not res['OK']:
        self.log.error( "Failed to obtain transformation directories", res['Message'] )
        return res
      transDirectories = res['Value'].splitlines()
      directories = self._addDirs( transID, transDirectories, directories )

    if 'MetadataCatalog' in self.directoryLocations:
      res = self.metadataClient.findDirectoriesByMetadata( {self.transfidmeta:transID} )
      if not res['OK']:
        self.log.error( "Failed to obtain metadata catalog directories", res['Message'] )
        return res
      transDirectories = res['Value']
      directories = self._addDirs( transID, transDirectories, directories )

    if not directories:
      self.log.info( "No output directories found" )
    directories = sortList( directories )
    return S_OK( directories )
Beispiel #37
0
    def getSubmittedFileStatus(self, fileDicts):
        taskFiles = {}
        for fileDict in fileDicts:
            transID = fileDict['TransformationID']
            taskID = fileDict['TaskID']
            taskName = str(transID).zfill(8) + '_' + str(taskID).zfill(8)
            if not taskFiles.has_key(taskName):
                taskFiles[taskName] = {}
            taskFiles[taskName][fileDict['LFN']] = fileDict['Status']

        updateDict = {}
        for taskName in sortList(taskFiles.keys()):
            lfnDict = taskFiles[taskName]
            res = self.requestClient.getRequestFileStatus(
                taskName, lfnDict.keys(), 'RequestManagement/centralURL')
            if not res['OK']:
                self.log.warn(
                    "getSubmittedFileStatus: Failed to get files status for request",
                    res['Message'])
                continue
            for lfn, newStatus in res['Value'].items():
                if newStatus == lfnDict[lfn]:
                    pass
                elif newStatus == 'Done':
                    updateDict[lfn] = 'Processed'
                elif newStatus == 'Failed':
                    updateDict[lfn] = 'Problematic'
        return S_OK(updateDict)
Beispiel #38
0
 def extendTransformation( self, transID, maxTasks ):
   gLogger.info( "Considering extension of transformation %d" % transID )
   # Get the current count of tasks submitted for this transformation
   res = self.transClient.getTransformationTaskStats( transID )
   if not res['OK']:
     if res['Message'] != 'No records found':
       gLogger.error( "Failed to get task statistics", "%s %s" % ( transID, res['Message'] ) )
       return res
     else:
       statusDict = {}
   else:
     statusDict = res['Value']
   gLogger.verbose( "Current task count for transformation %d" % transID )
   for status in sortList( statusDict.keys() ):
     statusCount = statusDict[status]
     gLogger.verbose( "%s : %s" % ( status.ljust( 20 ), str( statusCount ).rjust( 8 ) ) )
   # Determine the number of tasks to be created
   numberOfTasks = self.calculateTaskNumber( maxTasks, statusDict )
   if not numberOfTasks:
     gLogger.info( "No tasks required for transformation %d" % transID )
     return S_OK()
   # Extend the transformation by the determined number of tasks
   res = self.transClient.extendTransformation( transID, numberOfTasks )
   if not res['OK']:
     gLogger.error( "Failed to extend transformation", "%s %s" % ( transID, res['Message'] ) )
     return res
   gLogger.info( "Successfully extended transformation %d by %d tasks" % ( transID, numberOfTasks ) )
   return S_OK()
Beispiel #39
0
  def __checkPhysicalFiles( self, replicas, catalogMetadata, ses = [] ):
    """ This obtains the physical file metadata and checks the metadata against the catalog entries
    """
    seLfns = {}
    for lfn, replicaDict in replicas.items():
      for se, _url in replicaDict.items():
        if ( ses ) and ( se not in ses ):
          continue
        seLfns.setdefault( se, [] ).append( lfn )
    gLogger.info( '%s %s' % ( 'Storage Element'.ljust( 20 ), 'Replicas'.rjust( 20 ) ) )



    for se in sortList( seLfns ):
      files = len( seLfns[se] )
      gLogger.info( '%s %s' % ( se.ljust( 20 ), str( files ).rjust( 20 ) ) )

      lfns = seLfns[se]
      sizeMismatch = []
      res = self.__checkPhysicalFileMetadata( lfns, se )
      if not res['OK']:
        gLogger.error( 'Failed to get physical file metadata.', res['Message'] )
        return res
      for lfn, metadata in res['Value'].items():
        if lfn in catalogMetadata:
          if ( metadata['Size'] != catalogMetadata[lfn]['Size'] ) and ( metadata['Size'] != 0 ):
            sizeMismatch.append( ( lfn, 'deprecatedUrl', se, 'CatalogPFNSizeMismatch' ) )
      if sizeMismatch:
        self.__reportProblematicReplicas( sizeMismatch, se, 'CatalogPFNSizeMismatch' )
    return S_OK()
Beispiel #40
0
    def getStorageUsage(self):
        """ Fill the current Status of the SE Caches from the DB
    """
        self.storageElementCache = {}

        res = self.storageDB.getSubmittedStagePins()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.",
                res['Message'])
            return res
        self.storageElementUsage = res['Value']
        if self.storageElementUsage:
            gLogger.info(
                "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:"
            )
            for storageElement in sortList(self.storageElementUsage.keys()):
                seDict = self.storageElementUsage[storageElement]
                # Convert to GB for printout
                seDict['TotalSize'] = seDict['TotalSize'] / (1000 * 1000 *
                                                             1000.0)
                gLogger.info(
                    "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB."
                    % (storageElement.ljust(15), str(
                        seDict['Replicas']).rjust(6), seDict['TotalSize']))
        if not self.storageElementUsage:
            gLogger.info(
                "StageRequest.getStorageUsage: No active stage/pin requests found."
            )

        return S_OK()
  def getTransformationDirectories( self, transID ):
    """ Get the directories for the supplied transformation from the transformation system """
    directories = []
    if 'TransformationDB' in self.directoryLocations:
      res = self.transClient.getTransformationParameters( transID, ['OutputDirectories'] )
      if not res['OK']:
        gLogger.error( "Failed to obtain transformation directories", res['Message'] )
        return res
      transDirectories = res['Value'].splitlines()
      directories = self.__addDirs( transID, transDirectories, directories )

    if 'StorageUsage' in self.directoryLocations:
      res = self.storageUsageClient.getStorageDirectories( '', '', transID, [] )
      if not res['OK']:
        gLogger.error( "Failed to obtain storage usage directories", res['Message'] )
        return res
      transDirectories = res['Value']
      directories = self.__addDirs( transID, transDirectories, directories )

    if 'MetadataCatalog' in self.directoryLocations:
      res = self.metadataClient.findDirectoriesByMetadata( {self.transfidmeta:transID} )
      if not res['OK']:
        gLogger.error( "Failed to obtain metadata catalog directories", res['Message'] )
        return res
      transDirectories = res['Value']
      directories = self.__addDirs( transID, transDirectories, directories )
    if not directories:
      gLogger.info( "No output directories found" )
    directories = sortList( directories )
    return S_OK( directories )
Beispiel #42
0
 def __fileRetry(self, prodid, mode):
     id = int(prodid)
     RPC = getRPCClient('Transformation/TransformationManager')
     if mode == "proc":
         res = RPC.getTransformationFilesCount(prodid, "ErrorCount",
                                               {'Status': 'Processed'})
     elif mode == "not":
         res = RPC.getTransformationFilesCount(
             prodid, "ErrorCount",
             {'Status': ['Unused', 'Assigned', 'Failed']})
     elif mode == "all":
         res = RPC.getTransformationFilesCount(prodid, "ErrorCount")
     else:
         return {"success": "false", "error": res["Message"]}
     if not res['OK']:
         c.result = {"success": "false", "error": res["Message"]}
     else:
         resList = []
         total = res['Value'].pop('Total')
         if total == 0:
             c.result = {"success": "false", "error": "No files found"}
         else:
             for status in sortList(res['Value'].keys()):
                 count = res['Value'][status]
                 percent = "%.1f" % ((count * 100.0) / total)
                 resList.append((status, str(count), percent))
             resList.append(('Total', total, '-'))
             c.result = {"success": "true", "result": resList}
     gLogger.info("#######", res)
     return c.result
Beispiel #43
0
 def __fileRetry(self,prodid,mode):
   id = int(prodid)
   RPC = getRPCClient('Transformation/TransformationManager')
   if mode == "proc":
     res = RPC.getTransformationFilesCount(prodid,"ErrorCount",{'Status':'Processed'})
   elif mode == "not":
     res = RPC.getTransformationFilesCount(prodid,"ErrorCount",{'Status':['Unused','Assigned','Failed']})
   elif mode == "all":
     res = RPC.getTransformationFilesCount(prodid,"ErrorCount")
   else:
     return {"success":"false","error":res["Message"]}
   if not res['OK']:
     c.result = {"success":"false","error":res["Message"]}
   else:
     resList = []
     total = res['Value'].pop('Total')
     if total == 0:
       c.result = {"success":"false","error":"No files found"}
     else:
       for status in sortList(res['Value'].keys()):
         count = res['Value'][status]
         percent = "%.1f" % ((count*100.0)/total)
         resList.append((status,str(count),percent))
       resList.append(('Total',total,'-'))
       c.result = {"success":"true","result":resList}
   gLogger.info("#######",res)
   return c.result
Beispiel #44
0
 def retrieveRepositoryOutputDataLFNs(self, requestedStates=['Done']):
     """Helper function
     
     Get the list of uploaded output data for a set of jobs in a repository
     
     @param requestedStates: List of states requested for filtering the list
     @type requestedStates: list of strings
     @return: list
     """
     llist = []
     if not self.jobRepo:
         gLogger.warn("No repository is initialized")
         return S_OK()
     jobs = self.jobRepo.readRepository()['Value']
     for jobID in sortList(jobs.keys()):
         jobDict = jobs[jobID]
         if jobDict.has_key('State') and (jobDict['State']
                                          in requestedStates):
             if ( jobDict.has_key( 'UserOutputData' ) and ( not int( jobDict['UserOutputData'] ) ) ) or \
             ( not jobDict.has_key( 'UserOutputData' ) ):
                 params = self.parameters(int(jobID))
                 if params['OK']:
                     if params['Value'].has_key('UploadedOutputData'):
                         lfn = params['Value']['UploadedOutputData']
                         llist.append(lfn)
     return llist
Beispiel #45
0
    def getTransformationDirectories(self, transID):
        """ Get the directories for the supplied transformation from the transformation system
    """
        directories = []
        if 'TransformationDB' in self.directoryLocations:
            res = self.transClient.getTransformationParameters(
                transID, ['OutputDirectories'])
            if not res['OK']:
                gLogger.error("Failed to obtain transformation directories",
                              res['Message'])
                return res
            transDirectories = res['Value'].splitlines()
            directories = self._addDirs(transID, transDirectories, directories)

        if 'MetadataCatalog' in self.directoryLocations:
            res = self.fileCatalogClient.findDirectoriesByMetadata(
                {self.transfidmeta: transID})
            if not res['OK']:
                gLogger.error("Failed to obtain metadata catalog directories",
                              res['Message'])
                return res
            transDirectories = res['Value']
            directories = self._addDirs(transID, transDirectories, directories)
        if not directories:
            gLogger.info("No output directories found")
        directories = sortList(directories)
        return S_OK(directories)
Beispiel #46
0
 def __fileRetry(self, prodid, mode):
     id = int(prodid)
     RPC = getRPCClient("Transformation/TransformationManager")
     if mode == "proc":
         res = RPC.getTransformationFilesCount(prodid, "ErrorCount", {"Status": "Processed"})
     elif mode == "not":
         res = RPC.getTransformationFilesCount(prodid, "ErrorCount", {"Status": ["Unused", "Assigned", "Failed"]})
     elif mode == "all":
         res = RPC.getTransformationFilesCount(prodid, "ErrorCount")
     else:
         return {"success": "false", "error": res["Message"]}
     if not res["OK"]:
         c.result = {"success": "false", "error": res["Message"]}
     else:
         resList = []
         total = res["Value"].pop("Total")
         if total == 0:
             c.result = {"success": "false", "error": "No files found"}
         else:
             for status in sortList(res["Value"].keys()):
                 count = res["Value"][status]
                 percent = "%.1f" % ((count * 100.0) / total)
                 resList.append((status, str(count), percent))
             resList.append(("Total", total, "-"))
             c.result = {"success": "true", "result": resList}
     gLogger.info("#######", res)
     return c.result
Beispiel #47
0
 def __getFullOutput( self ):
   comm = ['glite-transfer-status', '-s', self.ftsServer, '-l', self.ftsGUID]
   res = executeGridCommand( '', comm, self.gridEnv )
   if not res['OK']:
     return res
   returnCode, output, errStr = res['Value']
   # Returns a non zero status if error
   if not returnCode == 0:
     return S_ERROR( errStr )
   statusExp = re.compile( "^(\S+)" )
   self.requestStatus = re.search( statusExp, output ).group( 1 )
   output = output.replace( "%s\n" % self.requestStatus, "", 1 )
   toRemove = ["'", "<", ">"]
   for char in toRemove:
     output = output.replace( char, '' )
   regExp = re.compile( "[ ]+Source:[ ]+(\S+)\n[ ]+Destination:[ ]+(\S+)\n[ ]+State:[ ]+(\S+)\n[ ]+Retries:[ ]+(\d+)\n[ ]+Reason:[ ]+([\S ]+).+?[ ]+Duration:[ ]+(\d+)", re.S )
   fileInfo = re.findall( regExp, output )
   for source, target, status, retries, reason, duration in fileInfo:
     lfn = ''
     for candidate in sortList( self.fileDict.keys() ):
       if re.search( candidate, source ):
         lfn = candidate
     if not lfn:
       continue
     self.__setFileParameter( lfn, 'Source', source )
     self.__setFileParameter( lfn, 'Target', target )
     self.__setFileParameter( lfn, 'Status', status )
     if reason == '(null)':
       reason = ''
     self.__setFileParameter( lfn, 'Reason', reason.replace( "\n", " " ) )
     self.__setFileParameter( lfn, 'Duration', int( duration ) )
   return S_OK()
Beispiel #48
0
  def getSubmittedFileStatus( self, fileDicts ):
    taskFiles = {}
    for fileDict in fileDicts:
      transID = fileDict['TransformationID']
      taskID = fileDict['TaskID']
      taskName = str( transID ).zfill( 8 ) + '_' + str( taskID ).zfill( 8 )
      if not taskFiles.has_key( taskName ):
        taskFiles[taskName] = {}
      taskFiles[taskName][fileDict['LFN']] = fileDict['Status']

    updateDict = {}
    for taskName in sortList( taskFiles.keys() ):
      lfnDict = taskFiles[taskName]
      res = self.requestClient.getRequestFileStatus( taskName, lfnDict.keys() )
      if not res['OK']:
        self.log.warn( "getSubmittedFileStatus: Failed to get files status for request", res['Message'] )
        continue
      for lfn, newStatus in res['Value'].items():
        if newStatus == lfnDict[lfn]:
          pass
        elif newStatus == 'Done':
          updateDict[lfn] = 'Processed'
        elif newStatus == 'Failed':
          updateDict[lfn] = 'Problematic'
    return S_OK( updateDict )
Beispiel #49
0
 def _getConfigStorageProtocols(self, storageName):
     """ Protocol specific information is present as sections in the Storage configuration
 """
     result = getSiteForResource('Storage', storageName)
     if not result['OK']:
         return result
     site = result['Value']
     result = self.resourcesHelper.getEligibleNodes('AccessProtocol', {
         'Site': site,
         'Resource': storageName
     })
     if not result['OK']:
         return result
     nodesDict = result['Value']
     protocols = []
     for site in nodesDict:
         for se in nodesDict[site]:
             protocols.extend(nodesDict[site][se])
     sortedProtocols = sortList(protocols)
     protocolDetails = []
     for protocol in sortedProtocols:
         result = self._getConfigStorageProtocolDetails(
             storageName, protocol)
         if not result['OK']:
             return result
         protocolDetails.append(result['Value'])
     self.protocols = self.localProtocols + self.remoteProtocols
     return S_OK(protocolDetails)
Beispiel #50
0
    def initialize(self):
        """Sets defaults """
        self.transClient = TransformationClient()

        # This sets the Default Proxy to used as that defined under
        # /Operations/Shifter/DataManager
        # the shifterProxy option in the Configuration can be used to change this default.
        self.am_setOption('shifterProxy', 'DataManager')

        self.transformationTypes = sortList(
            self.am_getOption('TransformationTypes',
                              ['MCSimulation', 'Simulation']))
        gLogger.info("Will consider the following transformation types: %s" %
                     str(self.transformationTypes))
        self.maxIterationTasks = self.am_getOption('TasksPerIteration', 50)
        gLogger.info("Will create a maximum of %s tasks per iteration" %
                     self.maxIterationTasks)
        self.maxFailRate = self.am_getOption('MaxFailureRate', 30)
        gLogger.info(
            "Will not submit tasks for transformations with failure rate greater than %s%s"
            % (self.maxFailRate, '%'))
        self.maxWaitingJobs = self.am_getOption('MaxWaitingJobs', 1000)
        gLogger.info(
            "Will not submit tasks for transformations with more than %d waiting jobs"
            % self.maxWaitingJobs)
        return S_OK()
Beispiel #51
0
    def __checkPhysicalFiles(self, replicas, catalogMetadata, ses=[]):
        """ This obtains the physical file metadata and checks the metadata against the catalog entries
    """
        seLfns = {}
        for lfn, replicaDict in replicas.items():
            for se, _url in replicaDict.items():
                if (ses) and (se not in ses):
                    continue
                seLfns.setdefault(se, []).append(lfn)
        gLogger.info('%s %s' %
                     ('Storage Element'.ljust(20), 'Replicas'.rjust(20)))

        for se in sortList(seLfns):
            files = len(seLfns[se])
            gLogger.info('%s %s' % (se.ljust(20), str(files).rjust(20)))

            lfns = seLfns[se]
            sizeMismatch = []
            res = self.__checkPhysicalFileMetadata(lfns, se)
            if not res['OK']:
                gLogger.error('Failed to get physical file metadata.',
                              res['Message'])
                return res
            for lfn, metadata in res['Value'].items():
                if lfn in catalogMetadata:
                    if (metadata['Size'] != catalogMetadata[lfn]['Size']) and (
                            metadata['Size'] != 0):
                        sizeMismatch.append((lfn, 'deprecatedUrl', se,
                                             'CatalogPFNSizeMismatch'))
            if sizeMismatch:
                self.__reportProblematicReplicas(sizeMismatch, se,
                                                 'CatalogPFNSizeMismatch')
        return S_OK()
def resolveTransforamtionProblematics( transID ):
  gLogger.notice("Obtaining problematic files for transformation %d" % transID)
  res = integrityClient.getTransformationProblematics(transID)
  if not res['OK']:
    gLogger.error("Failed to get transformation problematic files", res['Message'])
    return S_ERROR()
  problematicFiles = res['Value']
  if not problematicFiles:
    gLogger.notice("No problematic files found for transformation")
    return S_OK()
  for lfn in sortList(problematicFiles.keys()):
    prognosis = problematicFiles[lfn]['Prognosis']
    problematicDict = problematicFiles[lfn]
    gLogger.notice("Prognosis is %s" % prognosis )
    if not hasattr( integrityClient, methodToCall ):
      gLogger.notice( "DataIntegrityClient hasn't got '%s' member" % methodToCall )
      continue
    fcn = getattr( integrityClient, methodToCall )
    if not callable( fcn ):
      gLogger.notice( "DataIntegrityClient member '%s' isn't a method" % methodToCall )
      continue
    ## results not checked??? Where is The Food?
    res = fcn( problematicDict )
  gLogger.notice("Problematic files resolved for transformation %d" % transID) 
  return S_OK() 
 def updateTaskStatus(self):
   gLogger.info("updateTaskStatus: Updating the Status of tasks")
   # Get the transformations to be updated
   status = self.am_getOption('UpdateTasksStatus',['Active','Completing','Stopped'])
   res = self._selectTransformations(transType=self.transType,status=status,agentType=[])
   if not res['OK']:
     return res
   for transformation in res['Value']:
     transID = transformation['TransformationID']
     # Get the tasks which are in a UPDATE state
     updateStatus = self.am_getOption('TaskUpdateStatus',['Checking','Deleted','Killed','Staging','Stalled','Matched','Rescheduled','Completed','Submitted','Received','Waiting','Running'])
     condDict = {"TransformationID":transID,"ExternalStatus":updateStatus}
     timeStamp = str(datetime.datetime.utcnow() - datetime.timedelta(minutes=10))
     res = self.transClient.getTransformationTasks(condDict=condDict,older=timeStamp, timeStamp='LastUpdateTime')
     if not res['OK']:
       gLogger.error("updateTaskStatus: Failed to get tasks to update for transformation", "%s %s" % (transID,res['Message']))
       continue
     if not res['Value']:
       gLogger.verbose("updateTaskStatus: No tasks found to update for transformation %s" % transID)
       continue
     res = self.getSubmittedTaskStatus(res['Value'])
     if not res['OK']:
       gLogger.error("updateTaskStatus: Failed to get updated task statuses for transformation", "%s %s" % (transID,res['Message']))
       continue
     statusDict = res['Value']
     for status in sortList(statusDict.keys()):
       taskIDs = statusDict[status]
       gLogger.info("updateTaskStatus: Updating %d task(s) from transformation %d to %s" % (len(taskIDs),transID,status))
       res = self.transClient.setTaskStatus(transID,taskIDs,status)
       if not res['OK']:
         gLogger.error("updateTaskStatus: Failed to update task status for transformation", "%s %s" % (transID,res['Message']))
           
   gLogger.info("updateTaskStatus: Transformation task status update complete")  
   return S_OK()
Beispiel #54
0
    def execute(self):

        # Get the current submitted stage space and the amount of pinned space for each storage element
        res = self.storageDB.getSubmittedStagePins()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to obtain submitted requests from StorageManagementDB.",
                res['Message'])
            return res
        self.storageElementUsage = res['Value']
        if self.storageElementUsage:
            gLogger.info(
                "StageRequest.execute: Active stage/pin requests found at the following sites:"
            )
            for storageElement in sortList(self.storageElementUsage.keys()):
                seDict = self.storageElementUsage[storageElement]
                # Daniela: fishy? Changed it to GB and division by 1024 instead of 1000
                gLogger.info(
                    "StageRequest.execute: %s: %s replicas with a size of %.3f GB."
                    % (storageElement.ljust(15), str(
                        seDict['Replicas']).rjust(6), seDict['TotalSize'] /
                       (1024 * 1024 * 1024.0)))
        if not self.storageElementUsage:
            gLogger.info(
                "StageRequest.execute: No active stage/pin requests found.")
        res = self.submitStageRequests()
        return res
Beispiel #55
0
 def extendTransformation(self, transID, maxTasks):
     gLogger.info("Considering extension of transformation %d" % transID)
     # Get the current count of tasks submitted for this transformation
     res = self.transClient.getTransformationTaskStats(transID)
     if not res['OK']:
         if res['Message'] != 'No records found':
             gLogger.error("Failed to get task statistics",
                           "%s %s" % (transID, res['Message']))
             return res
         else:
             statusDict = {}
     else:
         statusDict = res['Value']
     gLogger.verbose("Current task count for transformation %d" % transID)
     for status in sortList(statusDict.keys()):
         statusCount = statusDict[status]
         gLogger.verbose("%s : %s" %
                         (status.ljust(20), str(statusCount).rjust(8)))
     # Determine the number of tasks to be created
     numberOfTasks = self._calculateTaskNumber(maxTasks, statusDict)
     if not numberOfTasks:
         gLogger.info("No tasks required for transformation %d" % transID)
         return S_OK()
     # Extend the transformation by the determined number of tasks
     res = self.transClient.extendTransformation(transID, numberOfTasks)
     if not res['OK']:
         gLogger.error("Failed to extend transformation",
                       "%s %s" % (transID, res['Message']))
         return res
     gLogger.info("Successfully extended transformation %d by %d tasks" %
                  (transID, numberOfTasks))
     return S_OK()
Beispiel #56
0
 def _getFileGroups( self, fileReplicas ):
   fileGroups = {}
   for lfn, replicas in fileReplicas.items():
     replicaSEs = str.join( ',', sortList( uniqueElements( replicas.keys() ) ) )
     if not fileGroups.has_key( replicaSEs ):
       fileGroups[replicaSEs] = []
     fileGroups[replicaSEs].append( lfn )
   return fileGroups
Beispiel #57
0
  def do_removeFile( self, args ):
    """Remove file from transformation DB

    usage: removeFile <lfn> [lfn]
    """
    argss = string.split( args )
    if not len( argss ) > 0:
      print "no files supplied"
      return
    res = self.server.removeFile( argss )
    if not res['OK']:
      print "failed to remove any files: %s" % res['Message']
      return
    for lfn in sortList( res['Value']['Failed'].keys() ):
      error = res['Value']['Failed'][lfn]
      print "failed to remove %s: %s" % ( lfn, error )
    for lfn in sortList( res['Value']['Successful'].keys() ):
      print "removed %s" % lfn
Beispiel #58
0
 def dumpSummary( self, printOutput = False ):
   outStr = ''
   for status in sortList( self.statusSummary.keys() ):
     if self.statusSummary[status]:
       outStr = '%s\t%s : %s\n' % ( outStr, status.ljust( 10 ), str( self.statusSummary[status] ).ljust( 10 ) )
   outStr = outStr.rstrip( '\n' )
   if printOutput:
     print outStr
   return S_OK( outStr )