Ejemplo n.º 1
0
  def web_getSelectionData( self ):
    '''It returns the possible selection data
    '''
    callback = {
                'name'        : set(),
                'elementType' : set(),
                'status'      : set(),
                'statusType'  : set(),
                'tokenOwner'  : set()
                }

    pub = RPCClient( 'ResourceStatus/Publisher' )

    gLogger.info( self.request.arguments )

    elementStatuses = yield self.threadTask( pub.getElementStatuses, 'Resource', None, None, None, None, None )

    if elementStatuses[ 'OK' ]:

      for elementStatus in elementStatuses[ 'Value' ]:
        callback[ 'status' ].add( elementStatus[ 2 ] )
        callback[ 'name' ].add( elementStatus[ 0 ] )
        callback[ 'elementType' ].add( elementStatus[ 6 ] )
        callback[ 'statusType' ].add( elementStatus[ 1 ] )
        callback[ 'tokenOwner' ].add( elementStatus[ 8 ] )
        
    for key, value in callback.items():

      callback[ key ] = [ [ item ] for item in list( value ) ]
      callback[ key ].sort()
      callback[ key ] = [ [ 'All' ] ] + callback[ key ]



    self.finish( callback )
Ejemplo n.º 2
0
 def export_requestDelegationUpload( self, requestedUploadTime, userGroup ):
   """ Request a delegation. Send a delegation request to client
   """
   credDict = self.getRemoteCredentials()
   userDN = credDict[ 'DN' ]
   userName = credDict[ 'username' ]
   if not userGroup:
     userGroup = credDict[ 'group' ]
   retVal = Registry.getGroupsForUser( credDict[ 'username' ] )
   if not retVal[ 'OK' ]:
     return retVal
   groupsAvailable = retVal[ 'Value' ]
   if userGroup not in groupsAvailable:
     return S_ERROR( "%s is not a valid group for user %s" % ( userGroup, userName ) )
   clientChain = credDict[ 'x509Chain' ]
   clientSecs = clientChain.getIssuerCert()[ 'Value' ].getRemainingSecs()[ 'Value' ]
   requestedUploadTime = min( requestedUploadTime, clientSecs )
   retVal = self.__proxyDB.getRemainingTime( userDN, userGroup )
   if not retVal[ 'OK' ]:
     return retVal
   remainingSecs = retVal[ 'Value' ]
   # If we have a proxy longer than the one uploading it's not needed
   # ten minute margin to compensate just in case
   if remainingSecs >= requestedUploadTime - 600:
     gLogger.info( "Upload request not necessary by %s:%s" % ( userName, userGroup ) )
     return self.__addKnownUserProxiesInfo( S_OK() )
   result = self.__proxyDB.generateDelegationRequest( credDict[ 'x509Chain' ], userDN )
   if result[ 'OK' ]:
     gLogger.info( "Upload request by %s:%s given id %s" % ( userName, userGroup, result['Value']['id'] ) )
   else:
     gLogger.error( "Upload request failed", "by %s:%s : %s" % ( userName, userGroup, result['Message'] ) )
   return result
Ejemplo n.º 3
0
def jobexec(jobxml, wfParameters):
  jobfile = os.path.abspath(jobxml)
  if not os.path.exists(jobfile):
    gLogger.warn('Path to specified workflow %s does not exist' % (jobfile))
    sys.exit(1)
  workflow = fromXMLFile(jobfile)
  gLogger.debug(workflow)
  code = workflow.createCode()
  gLogger.debug(code)
  jobID = 0
  if 'JOBID' in os.environ:
    jobID = os.environ['JOBID']
    gLogger.info('DIRAC JobID %s is running at site %s' % (jobID, DIRAC.siteName()))

  workflow.addTool('JobReport', JobReport(jobID))
  workflow.addTool('AccountingReport', DataStoreClient())
  workflow.addTool('Request', Request())

  # Propagate the command line parameters to the workflow if any
  for pName, pValue in wfParameters.items():
    workflow.setValue(pName, pValue)

  # Propagate the command line parameters to the workflow module instances of each step
  for stepdefinition in workflow.step_definitions.itervalues():
    for moduleInstance in stepdefinition.module_instances:
      for pName, pValue in wfParameters.iteritems():
        if moduleInstance.parameters.find(pName):
          moduleInstance.parameters.setValue(pName, pValue)

  return workflow.execute()
Ejemplo n.º 4
0
 def do_resetBucketLength(self, args):
   """
   Set the bucket Length. Will trigger a recalculation of buckets. Can take a while.
     Usage : resetBucketLength <typeName>
     <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
      should exist and inherit the base type
   """
   try:
     argList = args.split()
     if argList:
       typeName = argList[0].strip()
     else:
       gLogger.error("No type name specified")
       return
     # Try to import the type
     try:
       typeModule = __import__("DIRAC.AccountingSystem.Client.Types.%s" % typeName,
                               globals(),
                               locals(), typeName)
       typeClass = getattr(typeModule, typeName)
     except Exception as e:
       gLogger.error("Can't load type %s: %s" % (typeName, str(e)))
       return
     gLogger.info("Loaded type %s" % typeClass.__name__)
     typeDef = typeClass().getDefinition()
     acClient = RPCClient("Accounting/DataStore")
     retVal = acClient.setBucketsLength(typeDef[0], typeDef[3])
     if retVal['OK']:
       gLogger.info("Type registered successfully")
     else:
       gLogger.error("Error: %s" % retVal['Message'])
   except BaseException:
     self.showTraceback()
Ejemplo n.º 5
0
  def export_getProxyWithToken( self, userDN, userGroup, requestPem, requiredLifetime, token ):
    """
    Get a proxy for a userDN/userGroup
      - requestPem : PEM encoded request object for delegation
      - requiredLifetime: Argument for length of proxy
      - token : Valid token to get a proxy
      * Properties :
        FullDelegation <- permits full delegation of proxies
        LimitedDelegation <- permits downloading only limited proxies
        PrivateLimitedDelegation <- permits downloading only limited proxies for one self
    """
    credDict = self.getRemoteCredentials()
    result = self.__proxyDB.useToken( token, credDict[ 'DN' ], credDict[ 'group' ] )
    gLogger.info( "Trying to use token %s by %s:%s" % ( token, credDict[ 'DN' ], credDict[ 'group' ] ) )
    if not result[ 'OK' ]:
      return result
    if not result[ 'Value' ]:
      return S_ERROR( "Proxy token is invalid" )
    self.__proxyDB.logAction( "used token", credDict[ 'DN' ], credDict[ 'group' ], userDN, userGroup )

    result = self.__checkProperties( userDN, userGroup )
    if not result[ 'OK' ]:
      return result
    self.__proxyDB.logAction( "download proxy with token", credDict[ 'DN' ], credDict[ 'group' ], userDN, userGroup )
    return self.__getProxy( userDN, userGroup, requestPem, requiredLifetime, True )
Ejemplo n.º 6
0
 def registerActivities( self, sourceDict, activitiesDict, componentExtraInfo ):
   """
   Register new activities in the database
   """
   acCatalog = self.__createCatalog()
   rrdManager = self.__createRRDManager()
   #Register source
   if not self.__checkSourceDict( sourceDict ):
     return S_ERROR( "Source definition is not valid" )
   sourceId = acCatalog.registerSource( sourceDict )
   #Register activities
   for name in activitiesDict:
     if not self.__checkActivityDict( activitiesDict[ name ] ):
       return S_ERROR( "Definition for activity %s is not valid" % name )
     activitiesDict[ name ][ 'name' ] = name
     if not 'bucketLength' in activitiesDict[ name ]:
       activitiesDict[ name ][ 'bucketLength' ] = 60
     if not self.__checkActivityDict( activitiesDict[ name ] ):
       return S_ERROR( "Activity %s definition is not valid" % name )
     gLogger.info( "Received activity", "%s [%s]" % ( name, str( activitiesDict[ name ] ) ) )
     rrdFile = acCatalog.registerActivity( sourceId, name, activitiesDict[ name ] )
     if not rrdFile:
       return S_ERROR( "Could not register activity %s" % name )
     retVal = rrdManager.create( activitiesDict[ name ][ 'type' ], rrdFile, activitiesDict[ name ][ 'bucketLength' ] )
     if not retVal[ 'OK' ]:
       return retVal
   self.__cmdb_registerComponent( sourceId, sourceDict, componentExtraInfo )
   return S_OK( sourceId )
Ejemplo n.º 7
0
def initializePlottingHandler( serviceInfo ):

  #Get data location
  plottingSection = PathFinder.getServiceSection( "Framework/Plotting" )
  dataPath = gConfig.getValue( "%s/DataLocation" % plottingSection, "data/graphs" )
  dataPath = dataPath.strip()
  if "/" != dataPath[0]:
    dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
  gLogger.info( "Data will be written into %s" % dataPath )
  try:
    os.makedirs( dataPath )
  except:
    pass
  try:
    testFile = "%s/plot__.test" % dataPath
    fd = file( testFile, "w" )
    fd.close()
    os.unlink( testFile )
  except IOError:
    gLogger.fatal( "Can't write to %s" % dataPath )
    return S_ERROR( "Data location is not writable" )

  gPlotCache.setPlotsLocation( dataPath )
  gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Plotting requests", "plots", gMonitor.OP_SUM )
  return S_OK()
Ejemplo n.º 8
0
 def addHost( self, hostname, properties ):
   """
   Add a host to the cs
     - hostname
     - properties is a dict with keys:
       - DN
       - Properties
       - <extra params>
   Returns True/False
   """
   if not self.__initialized[ 'OK' ]:
     return self.__initialized
   for prop in ( "DN", ):
     if prop not in properties:
       gLogger.error( "Missing %s property for host %s" % ( prop, hostname ) )
       return S_OK( False )
   if hostname in self.listHosts()['Value']:
     gLogger.error( "Host %s is already registered" % hostname )
     return S_OK( False )
   self.__csMod.createSection( "%s/Hosts/%s" % ( self.__baseSecurity, hostname ) )
   for prop in properties:
     self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
   gLogger.info( "Registered host %s" % hostname )
   self.__csModified = True
   return S_OK( True )
Ejemplo n.º 9
0
 def __unlinkOldLog(self, filePath):
     try:
         gLogger.info("Unlinking file %s" % filePath)
         os.unlink(filePath)
     except Exception, e:
         gLogger.error("Can't unlink old log file", "%s: %s" % (filePath, str(e)))
         return 1
Ejemplo n.º 10
0
  def export_update( self, params, meta ):
    '''   
    This method is a bridge to access :class:`ResourceManagementDB` remotely. It 
    does not add neither processing nor validation. If you need to know more 
    about this method, you must keep reading on the database documentation.     
      
    :Parameters:
      **params** - `dict`
        arguments for the mysql query ( must match table columns ! ).
    
      **meta** - `dict`
        metadata for the mysql query. It must contain, at least, `table` key
        with the proper table name.

    :return: S_OK() || S_ERROR()
    '''      

    gLogger.info( 'update: %s %s' % ( params, meta ) )
    
    try:
      res = db.update( params, meta )
      gLogger.debug( 'update %s' % res )
    except Exception, e:
      _msg = 'Exception calling db.update: \n %s' % e
      gLogger.exception( _msg )
      res = S_ERROR( _msg )
Ejemplo n.º 11
0
 def addUser( self, username, properties ):
   """
   Add a user to the cs
     - username
     - properties is a dict with keys:
       - DN
       - groups
       - <extra params>
   Returns True/False
   """
   if not self.__initialized[ 'OK' ]:
     return self.__initialized
   for prop in ( "DN", "Groups" ):
     if prop not in properties:
       gLogger.error( "Missing %s property for user %s" % ( prop, username ) )
       return S_OK( False )
   if username in self.listUsers()['Value']:
     gLogger.error( "User %s is already registered" % username )
     return S_OK( False )
   groups = self.listGroups()['Value']
   for userGroup in properties[ 'Groups' ]:
     if not userGroup in groups:
       gLogger.error( "User %s group %s is not a valid group" % ( username, userGroup ) )
       return S_OK( False )
   self.__csMod.createSection( "%s/Users/%s" % ( self.__baseSecurity, username ) )
   for prop in properties:
     if prop == "Groups":
       continue
     self.__csMod.setOptionValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ), properties[ prop ] )
   for userGroup in properties[ 'Groups' ]:
     gLogger.info( "Added user %s to group %s" % ( username, userGroup ) )
     self.__addUserToGroup( userGroup, username )
   gLogger.info( "Registered user %s" % username )
   self.__csModified = True
   return S_OK( True )
Ejemplo n.º 12
0
  def _getMissingReplicas( self ):
    """ This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
        while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
        All SEs are considered, even if their Cache is full
    """
    # Get Replicas that are in Staged/StageSubmitted 
    gLogger.info( 'StageRequest._getMissingReplicas: Checking Staged Replicas' )

    res = self.__getStagedReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = {}

    allReplicaInfo = res['Value']['AllReplicaInfo']
    replicasToStage = []
    for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
      # Consider all SEs
      replicasToStage.extend( seReplicaIDs )

    # Get Replicas from the same Tasks as those selected
    res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
    if not res['OK']:
      gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'] )

    return res
Ejemplo n.º 13
0
  def export_enforcePolicies(self, granularity, name, useNewRes = True):
    """ Enforce all the policies. If `useNewRes` is False, use cached results only (where available).
    """
    try:
      gLogger.info("ResourceManagementHandler.enforcePolicies: Attempting to enforce policies for %s %s" % (granularity, name))
      try:
        reason = serviceType = resourceType = None 

        res = rsDB.getStuffToCheck(granularity, name = name)[0]
        status = res[1]
        formerStatus = res[2]
        siteType = res[3]
        tokenOwner = res[len(res)-1]
        if granularity == 'Resource':
          resourceType = res[4]
        elif granularity == 'Service':
          serviceType = res[4]
        
        from DIRAC.ResourceStatusSystem.PolicySystem.PEP import PEP
        pep = PEP(VOExtension, granularity, name, status, formerStatus, reason, siteType, 
                  serviceType, resourceType, tokenOwner, useNewRes)
        pep.enforce(rsDBIn = rsDB, rmDBIn = rmDB)
        
      except RSSDBException, x:
        gLogger.error(whoRaised(x))
      except RSSException, x:
        gLogger.error(whoRaised(x))
Ejemplo n.º 14
0
  def export_setStatus(self, element, name, statusType, status, elementType, username, lastCheckTime):

    lastCheckTime = datetime.strptime(lastCheckTime, '%Y-%m-%d %H:%M:%S')

    credentials = self.getRemoteCredentials()
    gLogger.info(credentials)

    elementInDB = rsClient.selectStatusElement(element, 'Status', name=name,
                                               statusType=statusType,
                                               # status = status,
                                               elementType=elementType,
                                               lastCheckTime=lastCheckTime)
    if not elementInDB['OK']:
      return elementInDB
    elif not elementInDB['Value']:
      return S_ERROR('Your selection has been modified. Please refresh.')

    reason = 'Status %s forced by %s ( web )' % (status, username)
    tokenExpiration = datetime.utcnow() + timedelta(days=1)

    newStatus = rsClient.addOrModifyStatusElement(element, 'Status', name=name,
                                                  statusType=statusType,
                                                  status=status,
                                                  elementType=elementType,
                                                  reason=reason,
                                                  tokenOwner=username,
                                                  tokenExpiration=tokenExpiration)
    if not newStatus['OK']:
      return newStatus

    return S_OK(reason)
Ejemplo n.º 15
0
 def __getDataReplicas( self, transID, lfns, active = True ):
   """ Get the replicas for the LFNs and check their statuses 
   """
   startTime = time.time()
   if active:
     res = self.rm.getActiveReplicas( lfns )
   else:
     res = self.rm.getReplicas( lfns )
   if not res['OK']:
     return res
   gLogger.info( "__getDataReplicas: Replica results for %d files obtained in %.2f seconds" % ( len( lfns ), time.time() - startTime ) )
   # Create a dictionary containing all the file replicas
   dataReplicas = {}
   for lfn, replicaDict in res['Value']['Successful'].items():
     ses = replicaDict.keys()
     for se in ses:
       if active and re.search( 'failover', se.lower() ):
         gLogger.warn( "__getDataReplicas: Ignoring failover replica for %s." % lfn )
       else:
         if not dataReplicas.has_key( lfn ):
           dataReplicas[lfn] = {}
         dataReplicas[lfn][se] = replicaDict[se]
   # Make sure that file missing from the catalog are marked in the transformation DB.
   missingLfns = []
   for lfn, reason in res['Value']['Failed'].items():
     if re.search( "No such file or directory", reason ):
       gLogger.warn( "__getDataReplicas: %s not found in the catalog." % lfn )
       missingLfns.append( lfn )
   if missingLfns:
     res = self.transDB.setFileStatusForTransformation( transID, 'MissingLFC', missingLfns )
     if not res['OK']:
       gLogger.warn( "__getDataReplicas: Failed to update status of missing files: %s." % res['Message'] )
   if not dataReplicas:
     return S_ERROR( "No replicas obtained" )
   return S_OK( dataReplicas )
Ejemplo n.º 16
0
 def export_getSiteWNsInfo(self, siteName):
   """
   Retruns the jobs statistics for hosts of the given site. 
   
   :return: S_OK( [ { 'Host' : 'aaa.bb.ccc'
                                       'Running' : 1
                                       'Done' : 22
                                       'Failed' : 22
                                       'Efficiency' : 50.0
                                       } ] ) / S_ERROR
   """
   
   gLogger.info('getSiteWNsInfo')
     
   queryRes = rmClient.selectWorkNodeCache(site = siteName, 
                                      meta = { 'columns' : [ 'Host', 'Done', 'Failed', 'Efficiency' ] })
   if not queryRes[ 'OK' ]:
     return queryRes
   records = queryRes[ 'Value' ]
   columns = queryRes[ 'Columns' ]
   
   results = []
   for record in records:
     results.append(dict(zip( columns, record )))
   
   return S_OK(results)
Ejemplo n.º 17
0
 def export_getTestHistory(self, elementType, element, fromDate, toDate):
   gLogger.info('getTestHistory')
   
   if fromDate > toDate:
     return S_ERROR('from date can not be after the to date.')
 
   selectElements = []
   if elementType == 'Site':  
     if element.split('.')[ 0 ] == 'CLOUD':
       selectElements.append( element )
     else:
       selectElements += CSHelpers.getSiteComputingElements(element)
     selectElements += CSHelpers.getSiteStorageElements(element)
   else:
     selectElements = [ element ]
       
   queryRes = rmClient.selectSAMResultLog(
                                          elementName = selectElements,
                                          meta = { 'newer' : ['LastCheckTime', fromDate ],
                                                  'older' : [ 'LastCheckTime', toDate ],
                                                  'columns' : [ 'ElementName', 'TestType', 'Status', 'LastCheckTime' ] }
                                          )
   if not queryRes[ 'OK' ]:
     return queryRes
   records = queryRes[ 'Value' ]
   
   testHistory = {}
   for record in records:
     key = record[ 0 ] + '-' + record[ 1 ]
     if key not in testHistory:
       testHistory[ key ] = []
     testHistory[ key ].append(( record[ 3 ], record[ 2 ] ))
   
   return S_OK(testHistory)
Ejemplo n.º 18
0
 def export_getTransformationProblematics( transID ):
   """ Get the problematics for a given transformation """
   gLogger.info( "DataIntegrityHandler.getTransformationProblematics: Getting problematics for transformation." )
   res = gDataIntegrityDB.getTransformationProblematics( transID )
   if not res['OK']:
     gLogger.error( "DataIntegrityHandler.getTransformationProblematics: Failed.", res['Message'] )
   return res
Ejemplo n.º 19
0
  def export_getSitesSAMSummary(self, sitesName, vo = None):
    """
    Return the dictionary with SAM summary information for the given sites.
    
    :return: S_OK( { site : { 'CEStatus' : 'OK'
                                                   'SEStatus' : 'Bad'
                                                   } } ) | S_ERROR
    """
    
    gLogger.info('getSitesSAMSummary')
    
    sitesSAMSummary = {}

    vo = vo or 'all'
    queryRes = rmClient.selectSiteSAMStatus(site = sitesName, vO = vo,
                                            meta = { 'newer' : [ 'LastCheckTime', 
                                                                datetime.utcnow().replace(microsecond = 0) - timedelta(hours = 24) ] })
    if not queryRes[ 'OK' ]:
      return queryRes
    records = queryRes[ 'Value' ]
    columns = queryRes[ 'Columns' ]
    
    for record in records:
      recordDict = dict(zip(columns, record))
      siteName = recordDict[ 'Site' ]
      sitesSAMSummary[ siteName ] = { 'CEStatus' : recordDict[ 'CEStatus' ], 'SEStatus' : recordDict[ 'SEStatus' ] }
      
    return S_OK(sitesSAMSummary)
Ejemplo n.º 20
0
 def execute(self):
   """ This is called by the Agent Reactor
   """
   res = self.ovc.getSites()
   if not res['OK']:
     return res
   sitedict = {}
   sites = res['Value']
   gLogger.info("Will update info for sites %s" % sites)
   for site in sites:
     attribdict = {"Site" : site, "ApplicationStatus": 'Getting overlay files'}
     res = self.jobmon.getCurrentJobCounters(attribdict)
     if not res['OK']:
       continue
     if res['Value'].has_key('Running'):
       sitedict[site] = res['Value']['Running']
     else:
       sitedict[site] = 0
   gLogger.info("Setting new values %s" % sitedict)    
   res = self.ovc.setJobsAtSites(sitedict)
   if not res['OK']:
     gLogger.error(res['Message'])
     return res
   
   return S_OK()
Ejemplo n.º 21
0
 def export_changeProblematicPrognosis( fileID, newPrognosis ):
   """ Change the prognosis for the supplied file """
   gLogger.info( "DataIntegrityHandler.changeProblematicPrognosis: Changing problematic prognosis." )
   res = gDataIntegrityDB.changeProblematicPrognosis( fileID, newPrognosis )
   if not res['OK']:
     gLogger.error( "DataIntegrityHandler.changeProblematicPrognosis: Failed to update.", res['Message'] )
   return res
 def execute(self):
     self.log.info("execute")
     #x = PingCommand()
     #x.execute()
     x = DMStestCommand().execute()
     gLogger.info(x)
     return S_OK()
Ejemplo n.º 23
0
 def execute(self):
   """ The main agent execution method
   """
   self.proxyDB.purgeLogs()
   gLogger.info( "Purging expired requests" )
   retVal = self.proxyDB.purgeExpiredRequests()
   if retVal[ 'OK' ]:
     gLogger.info( " purged %s requests" % retVal[ 'Value' ] )
   gLogger.info( "Purging expired proxies" )
   retVal = self.proxyDB.purgeExpiredProxies()
   if retVal[ 'OK' ]:
     gLogger.info( " purged %s proxies" % retVal[ 'Value' ] )
   retVal = self.proxyDB.getCredentialsAboutToExpire( self.am_getOption( "MinimumLifeTime" , 3600 ) )
   if not retVal[ 'OK' ]:
     return retVal
   data = retVal[ 'Value' ]
   gLogger.info( "Renewing %s proxies..." % len( data ) )
   for record in data:
     userDN = record[0]
     userGroup = record[1]
     self.__threadPool.generateJobAndQueueIt( self.__renewProxyForCredentials,
                                              args = ( userDN, userGroup ),
                                              oExceptionCallback = self.__treatRenewalCallback )
   self.__threadPool.processAllResults()
   return S_OK()
Ejemplo n.º 24
0
  def setFileProblematic( self, lfn, reason, sourceComponent = '' ):
    """ This method updates the status of the file in the FileCatalog and the IntegrityDB

        lfn - the lfn of the file
        reason - this is given to the integrity DB and should reflect the problem observed with the file

        sourceComponent is the component issuing the request.
    """
    if type( lfn ) == types.ListType:
      lfns = lfn
    elif type( lfn ) == types.StringType:
      lfns = [lfn]
    else:
      errStr = "DataIntegrityClient.setFileProblematic: Supplied file info must be list or a single LFN."
      gLogger.error( errStr )
      return S_ERROR( errStr )
    gLogger.info( "DataIntegrityClient.setFileProblematic: Attempting to update %s files." % len( lfns ) )
    successful = {}
    failed = {}
    fileMetadata = {}
    for lfn in lfns:
      fileMetadata[lfn] = {'Prognosis':reason, 'LFN':lfn, 'PFN':'', 'SE':''}
    res = self.insertProblematic( sourceComponent, fileMetadata )
    if not res['OK']:
      gLogger.error( "DataIntegrityClient.setReplicaProblematic: Failed to insert problematics to integrity DB" )
    return res
Ejemplo n.º 25
0
  def resolveLFNZeroReplicas( self, problematicDict ):
    """ This takes the problematic dictionary returned by the integrity DB and resolves the LFNZeroReplicas prognosis
    """
    lfn = problematicDict['LFN']
    fileID = problematicDict['FileID']

    res = self.rm.getCatalogReplicas( lfn, allStatus = True, singleFile = True )
    if res['OK'] and res['Value']:
      gLogger.info( "LFNZeroReplicas file (%d) found to have replicas" % fileID )
    else:
      gLogger.info( "LFNZeroReplicas file (%d) does not have replicas. Checking storage..." % fileID )
      pfnsFound = False
      for storageElementName in sortList( gConfig.getValue( 'Resources/StorageElementGroups/Tier1_MC_M-DST', [] ) ):
        res = self.__getStoragePathExists( [lfn], storageElementName )
        if res['Value'].has_key( lfn ):
          gLogger.info( "LFNZeroReplicas file (%d) found storage file at %s" % ( fileID, storageElementName ) )
          pfn = res['Value'][lfn]
          self.__reportProblematicReplicas( [( lfn, pfn, storageElementName, 'PFNNotRegistered' )], storageElementName, 'PFNNotRegistered' )
          pfnsFound = True
      if not pfnsFound:
        gLogger.info( "LFNZeroReplicas file (%d) did not have storage files. Removing..." % fileID )
        res = self.rm.removeCatalogFile( lfn, singleFile = True )
        if not res['OK']:
          gLogger.error( res['Message'] )
          # Increment the number of retries for this file
          self.server.incrementProblematicRetry( fileID )
          return res
        gLogger.info( "LFNZeroReplicas file (%d) removed from catalog" % fileID )
    # If we get here the problem is solved so we can update the integrityDB
    return self.__updateCompletedFiles( 'LFNZeroReplicas', fileID )
Ejemplo n.º 26
0
  def __getStoragePathExists( self, lfnPaths, storageElement ):
    gLogger.info( 'Determining the existance of %d files at %s' % ( len( lfnPaths ), storageElement ) )

    res = self.rm.getPfnForLfn( lfnPaths, storageElement )
    if not res['OK']:
      gLogger.error( "Failed to get PFNs for LFNs", res['Message'] )
      return res
    for lfnPath, error in res['Value']['Failed'].items():
      gLogger.error( 'Failed to obtain PFN from LFN', '%s %s' % ( lfnPath, error ) )
    if res['Value']['Failed']:
      return S_ERROR( 'Failed to obtain PFNs from LFNs' )
    lfnPfns = res['Value']['Successful']
    pfnLfns = {}
    for lfn, pfn in lfnPfns.items():
      pfnLfns[pfn] = lfn
    res = self.rm.getStorageFileExists( pfnLfns.keys(), storageElement )
    if not res['OK']:
      gLogger.error( "Failed to obtain existance of paths", res['Message'] )
      return res
    for lfnPath, error in res['Value']['Failed'].items():
      gLogger.error( 'Failed to determine existance of path', '%s %s' % ( lfnPath, error ) )
    if res['Value']['Failed']:
      return S_ERROR( 'Failed to determine existance of paths' )
    pathExists = res['Value']['Successful']
    resDict = {}
    for pfn, exists in pathExists.items():
      if exists:
        resDict[pfnLfns[pfn]] = pfn
    return S_OK( resDict )
Ejemplo n.º 27
0
  def __getCatalogMetadata( self, lfns ):
    """ Obtain the file metadata from the catalog while checking they exist
    """
    if not lfns:
      return S_OK( {} )
    gLogger.info( 'Obtaining the catalog metadata for %s files' % len( lfns ) )

    missingCatalogFiles = []
    zeroSizeFiles = []
    res = self.rm.getCatalogFileMetadata( lfns )
    if not res['OK']:
      gLogger.error( 'Failed to get catalog metadata', res['Message'] )
      return res
    allMetadata = res['Value']['Successful']
    for lfn, error in res['Value']['Failed'].items():
      if re.search( 'No such file or directory', error ):
        missingCatalogFiles.append( lfn )
    if missingCatalogFiles:
      self.__reportProblematicFiles( missingCatalogFiles, 'LFNCatalogMissing' )
    for lfn, metadata in allMetadata.items():
      if metadata['Size'] == 0:
        zeroSizeFiles.append( lfn )
    if zeroSizeFiles:
      self.__reportProblematicFiles( zeroSizeFiles, 'LFNZeroSize' )
    gLogger.info( 'Obtaining the catalog metadata complete' )
    return S_OK( allMetadata )
 def __generateReleaseNotes( self ):
   result = self.__loadReleaseNotesFile()
   if not result[ 'OK' ]:
     return result
   releaseData = result[ 'Value' ]
   if not releaseData:
     gLogger.info( "release.notes not found. Trying to find releasenotes.rst" )
     for rstFileName in ( "releasenotes.rst", "releasehistory.rst" ):
       result = self.__compileReleaseNotes( rstFileName )
       if result[ 'OK' ]:
         gLogger.notice( "Compiled %s file!" % rstFileName )
       else:
         gLogger.warn( result[ 'Message' ] )
     return S_OK()
   gLogger.info( "Loaded release.notes" )
   for rstFileName, singleVersion in ( ( "releasenotes.rst", True ),
                                       ( "releasehistory.rst", False ) ):
     result = self.__generateRSTFile( releaseData, rstFileName, self.params.version,
                                      singleVersion )
     if not result[ 'OK' ]:
       gLogger.error( "Could not generate %s: %s" % ( rstFileName, result[ 'Message' ] ) )
       continue
     result = self.__compileReleaseNotes( rstFileName )
     if not result[ 'OK' ]:
       gLogger.error( "Could not compile %s: %s" % ( rstFileName, result[ 'Message' ] ) )
       continue
     gLogger.notice( "Compiled %s file!" % rstFileName )
   return S_OK()
Ejemplo n.º 29
0
def checkLockAge(lockname):
  """ Check if there is a lock, and in that case deal with it, potentially remove it after n minutes
  """
  overwrite = False
  count = 0
  while True:
    if not os.path.exists(lockname):
      break
    count += 1
    gLogger.warn("Will wait one minute before proceeding")
    res = wasteCPUCycles(60)
    if not res['OK']:
      continue
    last_touch = time.time()
    try:
      stat = os.stat(lockname)
      last_touch = stat.st_atime
    except OSError as x:
      gLogger.warn("File not available: %s %s, assume removed" % (OSError, str(x))) 
      break
    loc_time = time.time()
    if loc_time-last_touch > 30*60: ##this is where I say the file is too old to still be valid (30 minutes)
      gLogger.info("File is %s seconds old" % str(loc_time-last_touch))
      overwrite = True
      res = clearLock(lockname)
      if res['OK']:
        break
    if count > 60: #We have been waiting for 60 minutes, something is wrong, kill it
      gLogger.error("Seems file stat is wrong, assume buggy, will fail installation")
      #overwrite = True
      res = clearLock(lockname)
      return S_ERROR("Buggy lock, removed: %s" % res['OK'])
      
  return S_OK(overwrite)
 def do_regenerateBuckets( self, args ):
   """
   Regenerate buckets for type. Can take a while.
     Usage : regenerateBuckets <typeName>
     <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
      should exist and inherit the base type
   """
   try:
     argList = args.split()
     if argList:
       typeName = argList[0].strip()
     else:
       gLogger.error( "No type name specified" )
       return
     #Try to import the type
     try:
       typeModule = __import__( "DIRAC.AccountingSystem.Client.Types.%s" % typeName,
                                 globals(),
                                 locals(), typeName )
       typeClass  = getattr( typeModule, typeName )
     except Exception, e:
       gLogger.error( "Can't load type %s: %s" % ( typeName, str(e) ) )
       return
     gLogger.info( "Loaded type %s"  % typeClass.__name__ )
     typeDef = typeClass().getDefinition()
     acClient = RPCClient( "Accounting/DataStore" )
     retVal = acClient.regenerateBuckets( typeDef[0] )
     if retVal[ 'OK' ]:
       gLogger.info( "Buckets recalculated!" )
     else:
       gLogger.error( "Error: %s" % retVal[ 'Message' ] )
Ejemplo n.º 31
0
def generateProxy(params):
  """ Generate proxy

      :param params: parameters

      :return: S_OK()/S_ERROR()
  """
  if params.checkClock:
    result = getClockDeviation()
    if result['OK']:
      deviation = result['Value']
      if deviation > 600:
        gLogger.error("Your host clock seems to be off by more than TEN MINUTES! Thats really bad.")
        gLogger.error("We're cowardly refusing to generate a proxy. Please fix your system time")
        sys.exit(1)
      elif deviation > 180:
        gLogger.error("Your host clock seems to be off by more than THREE minutes! Thats bad.")
        gLogger.notice("We'll generate the proxy but please fix your system time")
      elif deviation > 60:
        gLogger.error("Your host clock seems to be off by more than a minute! Thats not good.")
        gLogger.notice("We'll generate the proxy but please fix your system time")

  certLoc = params.certLoc
  keyLoc = params.keyLoc
  if not certLoc or not keyLoc:
    cakLoc = Locations.getCertificateAndKeyLocation()
    if not cakLoc:
      return S_ERROR("Can't find user certificate and key")
    if not certLoc:
      certLoc = cakLoc[0]
    if not keyLoc:
      keyLoc = cakLoc[1]
  params.certLoc = certLoc
  params.keyLoc = keyLoc

  # Load password
  testChain = X509Chain()
  retVal = testChain.loadChainFromFile(params.certLoc)
  if not retVal['OK']:
    return S_ERROR("Cannot load certificate %s: %s" % (params.certLoc, retVal['Message']))
  timeLeft = int(testChain.getRemainingSecs()['Value'] / 86400)
  if timeLeft < 30:
    gLogger.notice("\nYour certificate will expire in %d days. Please renew it!\n" % timeLeft)

  # First try reading the key from the file
  retVal = testChain.loadKeyFromFile(params.keyLoc, password=params.userPasswd)  # XXX why so commented?
  if not retVal['OK']:
    passwdPrompt = "Enter Certificate password:"******"\n")
    else:
      userPasswd = getpass.getpass(passwdPrompt)
    params.userPasswd = userPasswd

  # Find location
  proxyLoc = params.proxyLoc
  if not proxyLoc:
    proxyLoc = Locations.getDefaultProxyLocation()

  chain = X509Chain()
  # Load user cert and key
  retVal = chain.loadChainFromFile(certLoc)
  if not retVal['OK']:
    gLogger.warn(retVal['Message'])
    return S_ERROR("Can't load %s" % certLoc)
  retVal = chain.loadKeyFromFile(keyLoc, password=params.userPasswd)
  if not retVal['OK']:
    gLogger.warn(retVal['Message'])
    if 'bad decrypt' in retVal['Message'] or 'bad pass phrase' in retVal['Message']:
      return S_ERROR("Bad passphrase")
    return S_ERROR("Can't load %s" % keyLoc)

  if params.checkWithCS:
    retVal = chain.generateProxyToFile(proxyLoc,
                                       params.proxyLifeTime,
                                       strength=params.proxyStrength,
                                       limited=params.limitedProxy,
                                       rfc=params.rfc)

    gLogger.info("Contacting CS...")
    retVal = Script.enableCS()
    if not retVal['OK']:
      gLogger.warn(retVal['Message'])
      if 'Unauthorized query' in retVal['Message']:
        # add hint for users
        return S_ERROR("Can't contact DIRAC CS: %s (User possibly not registered with dirac server) "
                       % retVal['Message'])
      return S_ERROR("Can't contact DIRAC CS: %s" % retVal['Message'])
    userDN = chain.getCertInChain(-1)['Value'].getSubjectDN()['Value']

    if not params.diracGroup:
      result = Registry.findDefaultGroupForDN(userDN)
      if not result['OK']:
        gLogger.warn("Could not get a default group for DN %s: %s" % (userDN, result['Message']))
      else:
        params.diracGroup = result['Value']
        gLogger.info("Default discovered group is %s" % params.diracGroup)
    gLogger.info("Checking DN %s" % userDN)
    retVal = Registry.getUsernameForDN(userDN)
    if not retVal['OK']:
      gLogger.warn(retVal['Message'])
      return S_ERROR("DN %s is not registered" % userDN)
    username = retVal['Value']
    gLogger.info("Username is %s" % username)
    retVal = Registry.getGroupsForUser(username)
    if not retVal['OK']:
      gLogger.warn(retVal['Message'])
      return S_ERROR("User %s has no groups defined" % username)
    groups = retVal['Value']
    if params.diracGroup not in groups:
      return S_ERROR("Requested group %s is not valid for DN %s" % (params.diracGroup, userDN))
    gLogger.info("Creating proxy for %s@%s (%s)" % (username, params.diracGroup, userDN))
  if params.summary:
    h = int(params.proxyLifeTime / 3600)
    m = int(params.proxyLifeTime / 60) - h * 60
    gLogger.notice("Proxy lifetime will be %02d:%02d" % (h, m))
    gLogger.notice("User cert is %s" % certLoc)
    gLogger.notice("User key  is %s" % keyLoc)
    gLogger.notice("Proxy will be written to %s" % proxyLoc)
    if params.diracGroup:
      gLogger.notice("DIRAC Group will be set to %s" % params.diracGroup)
    else:
      gLogger.notice("No DIRAC Group will be set")
    gLogger.notice("Proxy strength will be %s" % params.proxyStrength)
    if params.limitedProxy:
      gLogger.notice("Proxy will be limited")
  retVal = chain.generateProxyToFile(proxyLoc,
                                     params.proxyLifeTime,
                                     params.diracGroup,
                                     strength=params.proxyStrength,
                                     limited=params.limitedProxy,
                                     rfc=params.rfc)
  if not retVal['OK']:
    gLogger.warn(retVal['Message'])
    return S_ERROR("Couldn't generate proxy: %s" % retVal['Message'])
  return S_OK(proxyLoc)
Ejemplo n.º 32
0
    def executeRequest(self):
        ################################################
        # Get a request from request DB
        gMonitor.addMark("Iteration", 1)
        res = self.RequestDBClient.getRequest('transfer')
        if not res['OK']:
            gLogger.info(
                "TransferAgent.execute: Failed to get request from database.")
            return S_OK()
        elif not res['Value']:
            gLogger.info(
                "TransferAgent.execute: No requests to be executed found.")
            return S_OK()
        requestString = res['Value']['RequestString']
        requestName = res['Value']['RequestName']
        sourceServer = res['Value']['Server']
        try:
            jobID = int(res['Value']['JobID'])
        except:
            jobID = 0
        gLogger.info("TransferAgent.execute: Obtained request %s" %
                     requestName)

        result = self.RequestDBClient.getCurrentExecutionOrder(
            requestName, sourceServer)
        if result['OK']:
            currentOrder = result['Value']
        else:
            return S_OK('Can not get the request execution order')

        oRequest = RequestContainer(request=requestString)

        ################################################
        # Find the number of sub-requests from the request
        res = oRequest.getNumSubRequests('transfer')
        if not res['OK']:
            errStr = "TransferAgent.execute: Failed to obtain number of transfer subrequests."
            gLogger.error(errStr, res['Message'])
            return S_OK()
        gLogger.info("TransferAgent.execute: Found %s sub requests." %
                     res['Value'])

        ################################################
        # For all the sub-requests in the request
        modified = False
        for ind in range(res['Value']):
            gMonitor.addMark("Execute", 1)
            gLogger.info("TransferAgent.execute: Processing sub-request %s." %
                         ind)
            subRequestAttributes = oRequest.getSubRequestAttributes(
                ind, 'transfer')['Value']
            if subRequestAttributes['ExecutionOrder']:
                subExecutionOrder = int(subRequestAttributes['ExecutionOrder'])
            else:
                subExecutionOrder = 0
            subStatus = subRequestAttributes['Status']
            if subStatus == 'Waiting' and subExecutionOrder <= currentOrder:
                subRequestFiles = oRequest.getSubRequestFiles(
                    ind, 'transfer')['Value']
                operation = subRequestAttributes['Operation']

                subRequestError = ''
                ################################################
                #  If the sub-request is a put and register operation
                if operation == 'putAndRegister' or operation == 'putAndRegisterAndRemove':
                    gLogger.info(
                        "TransferAgent.execute: Attempting to execute %s sub-request."
                        % operation)
                    diracSE = str(subRequestAttributes['TargetSE'])
                    catalog = ''
                    if subRequestAttributes.has_key('Catalogue'):
                        catalog = subRequestAttributes['Catalogue']
                    for subRequestFile in subRequestFiles:
                        if subRequestFile['Status'] == 'Waiting':
                            gMonitor.addMark("Put and register", 1)
                            lfn = str(subRequestFile['LFN'])
                            file = subRequestFile['PFN']
                            guid = subRequestFile['GUID']
                            addler = subRequestFile['Addler']
                            res = self.ReplicaManager.putAndRegister(
                                lfn,
                                file,
                                diracSE,
                                guid=guid,
                                checksum=addler,
                                catalog=catalog)
                            if res['OK']:
                                if res['Value']['Successful'].has_key(lfn):
                                    if not res['Value']['Successful'][
                                            lfn].has_key('put'):
                                        gMonitor.addMark("Put failed", 1)
                                        self.DataLog.addFileRecord(
                                            lfn, 'PutFail', diracSE, '',
                                            'TransferAgent')
                                        gLogger.info(
                                            "TransferAgent.execute: Failed to put %s to %s."
                                            % (lfn, diracSE))
                                        subRequestError = "Put operation failed for %s to %s" % (
                                            lfn, diracSE)
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Error',
                                            'Put failed')
                                    elif not res['Value']['Successful'][
                                            lfn].has_key('register'):
                                        gMonitor.addMark("Put successful", 1)
                                        gMonitor.addMark(
                                            "File registration failed", 1)
                                        self.DataLog.addFileRecord(
                                            lfn, 'Put', diracSE, '',
                                            'TransferAgent')
                                        self.DataLog.addFileRecord(
                                            lfn, 'RegisterFail', diracSE, '',
                                            'TransferAgent')
                                        gLogger.info(
                                            "TransferAgent.execute: Successfully put %s to %s in %s seconds."
                                            % (lfn, diracSE, res['Value']
                                               ['Successful'][lfn]['put']))
                                        gLogger.info(
                                            "TransferAgent.execute: Failed to register %s to %s."
                                            % (lfn, diracSE))
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Status',
                                            'Done')
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Error',
                                            'Registration failed')
                                        subRequestError = "Registration failed for %s to %s" % (
                                            lfn, diracSE)
                                        fileDict = res['Value']['Failed'][lfn][
                                            'register']
                                        registerRequestDict = {
                                            'Attributes': {
                                                'TargetSE':
                                                fileDict['TargetSE'],
                                                'Operation': 'registerFile'
                                            },
                                            'Files': [{
                                                'LFN':
                                                fileDict['LFN'],
                                                'PFN':
                                                fileDict['PFN'],
                                                'Size':
                                                fileDict['Size'],
                                                'Addler':
                                                fileDict['Addler'],
                                                'GUID':
                                                fileDict['GUID']
                                            }]
                                        }
                                        gLogger.info(
                                            "TransferAgent.execute: Setting registration request for failed file."
                                        )
                                        oRequest.addSubRequest(
                                            registerRequestDict, 'register')
                                        modified = True
                                    else:
                                        gMonitor.addMark("Put successful", 1)
                                        gMonitor.addMark(
                                            "File registration successful", 1)
                                        self.DataLog.addFileRecord(
                                            lfn, 'Put', diracSE, '',
                                            'TransferAgent')
                                        self.DataLog.addFileRecord(
                                            lfn, 'Register', diracSE, '',
                                            'TransferAgent')
                                        gLogger.info(
                                            "TransferAgent.execute: Successfully put %s to %s in %s seconds."
                                            % (lfn, diracSE, res['Value']
                                               ['Successful'][lfn]['put']))
                                        gLogger.info(
                                            "TransferAgent.execute: Successfully registered %s to %s in %s seconds."
                                            %
                                            (lfn, diracSE, res['Value']
                                             ['Successful'][lfn]['register']))
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Status',
                                            'Done')
                                        modified = True
                                else:
                                    gMonitor.addMark("Put failed", 1)
                                    self.DataLog.addFileRecord(
                                        lfn, 'PutFail', diracSE, '',
                                        'TransferAgent')
                                    errStr = "TransferAgent.execute: Failed to put and register file."
                                    gLogger.error(
                                        errStr, "%s %s %s" %
                                        (lfn, diracSE,
                                         res['Value']['Failed'][lfn]))
                                    oRequest.setSubRequestFileAttributeValue(
                                        ind, 'transfer', lfn, 'Error',
                                        'Complete file failure')
                                    subRequestError = "Failed to put and register file"
                            else:
                                gMonitor.addMark("Put failed", 1)
                                self.DataLog.addFileRecord(
                                    lfn, 'PutFail', diracSE, '',
                                    'TransferAgent')
                                errStr = "TransferAgent.execute: Completely failed to put and register file."
                                gLogger.error(errStr, res['Message'])
                                oRequest.setSubRequestFileAttributeValue(
                                    ind, 'transfer', lfn, 'Error',
                                    'RM call failure')
                                subRequestError = operation + " RM call file"
                        else:
                            gLogger.info(
                                "TransferAgent.execute: File already completed."
                            )

                ################################################
                #  If the sub-request is a put operation
                elif operation == 'put':
                    gLogger.info(
                        "TransferAgent.execute: Attempting to execute %s sub-request."
                        % operation)
                    diracSE = subRequestAttributes['TargetSE']
                    for subRequestFile in subRequestFiles:
                        if subRequestFile['Status'] == 'Waiting':
                            gMonitor.addMark("Put", 1)
                            lfn = subRequestFile['LFN']
                            file = subRequestFile['PFN']
                            res = self.ReplicaManager.put(lfn, file, diracSE)
                            if res['OK']:
                                if res['Value']['Successful'].has_key(lfn):
                                    gMonitor.addMark("Put successful", 1)
                                    self.DataLog.addFileRecord(
                                        lfn, 'Put', diracSE, '',
                                        'TransferAgent')
                                    gLogger.info(
                                        "TransferAgent.execute: Successfully put %s to %s in %s seconds."
                                        % (lfn, diracSE,
                                           res['Value']['Successful'][lfn]))
                                    oRequest.setSubRequestFileAttributeValue(
                                        ind, 'transfer', lfn, 'Status', 'Done')
                                    modified = True
                                else:
                                    gMonitor.addMark("Put failed", 1)
                                    self.DataLog.addFileRecord(
                                        lfn, 'PutFail', diracSE, '',
                                        'TransferAgent')
                                    errStr = "TransferAgent.execute: Failed to put file."
                                    gLogger.error(
                                        errStr, "%s %s %s" %
                                        (lfn, diracSE,
                                         res['Value']['Failed'][lfn]))
                                    subRequestError = "Put operation failed for %s to %s" % (
                                        lfn, diracSE)
                                    oRequest.setSubRequestFileAttributeValue(
                                        ind, 'transfer', lfn, 'Error',
                                        'Put failed')
                            else:
                                gMonitor.addMark("Put failed", 1)
                                self.DataLog.addFileRecord(
                                    lfn, 'PutFail', diracSE, '',
                                    'TransferAgent')
                                errStr = "TransferAgent.execute: Completely failed to put file."
                                gLogger.error(errStr, res['Message'])
                                subRequestError = "Put RM call failed for %s to %s" % (
                                    lfn, diracSE)
                                oRequest.setSubRequestFileAttributeValue(
                                    ind, 'transfer', lfn, 'Error',
                                    'Put RM call failed')
                        else:
                            gLogger.info(
                                "TransferAgent.execute: File already completed."
                            )

                ################################################
                #  If the sub-request is a replicate and register operation
                elif operation == 'replicateAndRegister' or operation == 'replicateAndRegisterAndRemove':
                    gLogger.info(
                        "TransferAgent.execute: Attempting to execute %s sub-request."
                        % operation)
                    targetSE = subRequestAttributes['TargetSE']
                    sourceSE = subRequestAttributes['SourceSE']
                    if sourceSE == "None":
                        sourceSE = ''
                    for subRequestFile in subRequestFiles:
                        if subRequestFile['Status'] == 'Waiting':
                            gMonitor.addMark("Replicate and register", 1)
                            lfn = subRequestFile['LFN']
                            res = self.ReplicaManager.replicateAndRegister(
                                lfn, targetSE, sourceSE=sourceSE)
                            if res['OK']:
                                if res['Value']['Successful'].has_key(lfn):
                                    if not res['Value']['Successful'][
                                            lfn].has_key('replicate'):
                                        gLogger.info(
                                            "TransferAgent.execute: Failed to replicate %s to %s."
                                            % (lfn, targetSE))
                                        gMonitor.addMark(
                                            "Replication failed", 1)
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, "Error",
                                            "Replication failed")
                                        subRequestError = "Replication failed for %s to %s" % (
                                            lfn, targetSE)
                                    elif not res['Value']['Successful'][
                                            lfn].has_key('register'):
                                        gMonitor.addMark(
                                            "Replication successful", 1)
                                        gMonitor.addMark(
                                            "Replica registration failed", 1)
                                        gLogger.info(
                                            "TransferAgent.execute: Successfully replicated %s to %s in %s seconds."
                                            %
                                            (lfn, targetSE, res['Value']
                                             ['Successful'][lfn]['replicate']))
                                        gLogger.info(
                                            "TransferAgent.execute: Failed to register %s to %s."
                                            % (lfn, targetSE))
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Status',
                                            'Done')
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Error',
                                            'Registration failed')
                                        subRequestError = "Registration failed for %s to %s" % (
                                            lfn, targetSE)
                                        fileDict = res['Value']['Failed'][lfn][
                                            'register']
                                        registerRequestDict = {
                                            'Attributes': {
                                                'TargetSE':
                                                fileDict['TargetSE'],
                                                'Operation': 'registerReplica'
                                            },
                                            'Files': [{
                                                'LFN': fileDict['LFN'],
                                                'PFN': fileDict['PFN']
                                            }]
                                        }
                                        gLogger.info(
                                            "TransferAgent.execute: Setting registration request for failed replica."
                                        )
                                        oRequest.addSubRequest(
                                            registerRequestDict, 'register')
                                        modified = True
                                    else:
                                        gMonitor.addMark(
                                            "Replication successful", 1)
                                        gMonitor.addMark(
                                            "Replica registration successful",
                                            1)
                                        gLogger.info(
                                            "TransferAgent.execute: Successfully replicated %s to %s in %s seconds."
                                            %
                                            (lfn, targetSE, res['Value']
                                             ['Successful'][lfn]['replicate']))
                                        gLogger.info(
                                            "TransferAgent.execute: Successfully registered %s to %s in %s seconds."
                                            %
                                            (lfn, targetSE, res['Value']
                                             ['Successful'][lfn]['register']))
                                        oRequest.setSubRequestFileAttributeValue(
                                            ind, 'transfer', lfn, 'Status',
                                            'Done')
                                        modified = True
                                else:
                                    gMonitor.addMark("Replication failed", 1)
                                    errStr = "TransferAgent.execute: Failed to replicate and register file."
                                    gLogger.error(
                                        errStr, "%s %s %s" %
                                        (lfn, targetSE,
                                         res['Value']['Failed'][lfn]))

                            else:
                                gMonitor.addMark("Replication failed", 1)
                                errStr = "TransferAgent.execute: Completely failed to replicate and register file."
                                gLogger.error(errStr, res['Message'])
                                oRequest.setSubRequestFileAttributeValue(
                                    ind, 'transfer', lfn, 'Error',
                                    'RM call failure')
                                subRequestError = operation + " RM call failed"
                        else:
                            gLogger.info(
                                "TransferAgent.execute: File already completed."
                            )

                ################################################
                #  If the sub-request is a replicate operation
                elif operation == 'replicate':
                    gLogger.info(
                        "TransferAgent.execute: Attempting to execute %s sub-request."
                        % operation)
                    targetSE = subRequestAttributes['TargetSE']
                    sourceSE = subRequestAttributes['SourceSE']
                    for subRequestFile in subRequestFiles:
                        if subRequestFile['Status'] == 'Waiting':
                            gMonitor.addMark("Replicate", 1)
                            lfn = subRequestFile['LFN']
                            res = self.ReplicaManager.replicate(
                                lfn, targetSE, sourceSE=sourceSE)
                            if res['OK']:
                                if res['Value']['Successful'].has_key(lfn):
                                    gMonitor.addMark("Replication successful",
                                                     1)
                                    gLogger.info(
                                        "TransferAgent.execute: Successfully replicated %s to %s in %s seconds."
                                        % (lfn, diracSE,
                                           res['Value']['Successful'][lfn]))
                                    oRequest.setSubRequestFileAttributeValue(
                                        ind, 'transfer', lfn, 'Status', 'Done')
                                    modified = True
                                else:
                                    gMonitor.addMark("Replication failed", 1)
                                    errStr = "TransferAgent.execute: Failed to replicate file."
                                    gLogger.error(
                                        errStr, "%s %s %s" %
                                        (lfn, targetSE,
                                         res['Value']['Failed'][lfn]))
                                    subRequestError = "Replicate operation failed for %s to %s" % (
                                        lfn, targetSE)
                                    oRequest.setSubRequestFileAttributeValue(
                                        ind, 'transfer', lfn, 'Error',
                                        'Put failed')
                            else:
                                gMonitor.addMark("Replication failed", 1)
                                errStr = "TransferAgent.execute: Completely failed to replicate file."
                                gLogger.error(errStr, res['Message'])
                                subRequestError = "Replicate RM call failed for %s to %s" % (
                                    lfn, targetSE)
                                oRequest.setSubRequestFileAttributeValue(
                                    ind, 'transfer', lfn, 'Error',
                                    'Replicate RM call failed')
                        else:
                            gLogger.info(
                                "TransferAgent.execute: File already completed."
                            )

                ################################################
                #  If the sub-request is a get operation
                elif operation == 'get':
                    gLogger.info(
                        "TransferAgent.execute: Attempting to execute %s sub-request."
                        % operation)
                    sourceSE = subRequestAttributes['TargetSE']
                    for subRequestFile in subRequestFiles:
                        if subRequestFile['Status'] == 'Waiting':
                            lfn = str(subRequestFile['LFN'])
                            pfn = str(subRequestFile['PFN'])
                            got = False
                            if sourceSE and pfn:
                                res = self.ReplicaManager.getStorageFile(
                                    pfn, sourceSE)
                                if res['Value']['Successful'].has_key(pfn):
                                    got = True
                            else:
                                res = self.ReplicaManager.getFile(lfn)
                                if res['Value']['Successful'].has_key(lfn):
                                    got = False
                            if got:
                                gLogger.info(
                                    "TransferAgent.execute: Successfully got %s."
                                    % lfn)
                                oRequest.setSubRequestFileAttributeValue(
                                    ind, 'transfer', lfn, 'Status', 'Done')
                                modified = True
                            else:
                                errStr = "TransferAgent.execute: Failed to get file."
                                gLogger.error(errStr, lfn)
                        else:
                            gLogger.info(
                                "TransferAgent.execute: File already completed."
                            )

                ################################################
                #  If the sub-request is none of the above types
                else:
                    gLogger.error(
                        "TransferAgent.execute: Operation not supported.",
                        operation)

                if subRequestError:
                    oRequest.setSubRequestAttributeValue(
                        ind, 'transfer', 'Error', subRequestError)

                ################################################
                #  Determine whether there are any active files
                if oRequest.isSubRequestEmpty(ind, 'transfer')['Value']:
                    oRequest.setSubRequestStatus(ind, 'transfer', 'Done')
                    gMonitor.addMark("Done", 1)

            ################################################
            #  If the sub-request is already in terminal state
            else:
                gLogger.info(
                    "TransferAgent.execute: Sub-request %s is status '%s' and  not to be executed."
                    % (ind, subRequestAttributes['Status']))

        ################################################
        #  Generate the new request string after operation
        requestString = oRequest.toXML()['Value']
        res = self.RequestDBClient.updateRequest(requestName, requestString,
                                                 sourceServer)

        if modified and jobID:
            result = self.finalizeRequest(requestName, jobID, sourceServer)
        return S_OK()
Ejemplo n.º 33
0
 def flush(self, allData=False):
     gLogger.info('Flushing monitoring')
     for mc in self.__mcList:
         mc.flush(allData)
Ejemplo n.º 34
0
 def __init__(self):
     self.__mcList = []
     gLogger.info("Using MonitoringClient in IOLoop mode")
     # Here we don't need to use IOLoop.current(), tornado will attach periodic callback to the current IOLoop himself
     # We set callback every 5 minnutes
     tornado.ioloop.PeriodicCallback(self.flush, 300000).start()
Ejemplo n.º 35
0
 def updateWaitingIntegrity(self):
     """ Get 'WaitingIntegrity' transformations, update to 'ValidatedOutput'
 """
     gLogger.info(
         "Looking for transformations in the WaitingIntegrity status to update"
     )
     res = self.transClient.getTransformations(
         {'Status': 'WaitingIntegrity'})
     if not res['OK']:
         gLogger.error("Failed to get WaitingIntegrity transformations",
                       res['Message'])
         return res
     transDicts = res['Value']
     if not transDicts:
         gLogger.info("No transformations found in WaitingIntegrity status")
         return S_OK()
     gLogger.info("Found %s transformations in WaitingIntegrity status" %
                  len(transDicts))
     for transDict in transDicts:
         transID = transDict['TransformationID']
         gLogger.info("-" * 40)
         res = self.integrityClient.getTransformationProblematics(
             int(transID))
         if not res['OK']:
             gLogger.error(
                 "Failed to determine waiting problematics for transformation",
                 res['Message'])
         elif not res['Value']:
             res = self.transClient.setTransformationParameter(
                 transID, 'Status', 'ValidatedOutput')
             if not res['OK']:
                 gLogger.error(
                     "Failed to update status of transformation %s to ValidatedOutput"
                     % (transID))
             else:
                 gLogger.info(
                     "Updated status of transformation %s to ValidatedOutput"
                     % (transID))
         else:
             gLogger.info(
                 "%d problematic files for transformation %s were found" %
                 (len(res['Value']), transID))
     return
Ejemplo n.º 36
0
 def export_updateTaskStatus( self, sourceID, status, successful = [], failed = [] ):
   """ An example to show the usage of the callbacks. """
   gLogger.info( "updateTaskStatus: Received callback information for ID %s" % sourceID )
   gLogger.info( "updateTaskStatus: Status = '%s'" % status )
   if successful:
     gLogger.info( "updateTaskStatus: %s files successfully staged" % len( successful ) )
     for lfn, time in successful:
       gLogger.info( "updateTaskStatus: %s %s" % ( lfn.ljust( 100 ), time.ljust( 10 ) ) )
   if failed:
     gLogger.info( "updateTaskStatus: %s files failed to stage" % len( successful ) )
     for lfn, time in failed:
       gLogger.info( "updateTaskStatus: %s %s" % ( lfn.ljust( 100 ), time.ljust( 10 ) ) )
   return S_OK()
Ejemplo n.º 37
0
    def checkTransformationIntegrity(self, prodID):
        """ This method contains the real work
    """
        gLogger.info("-" * 40)
        gLogger.info("Checking the integrity of production %s" % prodID)
        gLogger.info("-" * 40)

        res = self.getTransformationDirectories(prodID)
        if not res['OK']:
            return res
        directories = res['Value']

        ######################################################
        #
        # This check performs BK->Catalog->SE
        #
        res = self.integrityClient.productionToCatalog(prodID)
        if not res['OK']:
            gLogger.error(res['Message'])
            return res
        bk2catalogMetadata = res['Value']['CatalogMetadata']
        bk2catalogReplicas = res['Value']['CatalogReplicas']
        res = self.integrityClient.checkPhysicalFiles(bk2catalogReplicas,
                                                      bk2catalogMetadata)
        if not res['OK']:
            gLogger.error(res['Message'])
            return res

        if not directories:
            return S_OK()

        ######################################################
        #
        # This check performs Catalog->BK and Catalog->SE for possible output directories
        #
        res = self.fileCatalog.exists(directories)
        if not res['OK']:
            gLogger.error(res['Message'])
            return res
        for directory, error in res['Value']['Failed']:
            gLogger.error('Failed to determine existance of directory',
                          '%s %s' % (directory, error))
        if res['Value']['Failed']:
            return S_ERROR("Failed to determine the existance of directories")
        directoryExists = res['Value']['Successful']
        for directory in sorted(directoryExists.keys()):
            if not directoryExists[directory]:
                continue
            iRes = self.integrityClient.catalogDirectoryToBK(directory)
            if not iRes['OK']:
                gLogger.error(iRes['Message'])
                return iRes
            catalogDirMetadata = iRes['Value']['CatalogMetadata']
            catalogDirReplicas = iRes['Value']['CatalogReplicas']
            catalogMetadata = {}
            catalogReplicas = {}
            for lfn in catalogDirMetadata.keys():
                if lfn not in bk2catalogMetadata.keys():
                    catalogMetadata[lfn] = catalogDirMetadata[lfn]
                    if lfn in catalogDirReplicas:
                        catalogReplicas[lfn] = catalogDirReplicas[lfn]
            if not catalogMetadata:
                continue
            res = self.integrityClient.checkPhysicalFiles(
                catalogReplicas, catalogMetadata)
            if not res['OK']:
                gLogger.error(res['Message'])
                return res

        return S_OK()
Ejemplo n.º 38
0
    def sendMail(self, sendDict=None, title=None, body=None, fromAddress=None):
        """
    Sending an email using sendDict: { e-mail : name } as addressbook
    title and body is the e-mail's Subject and Body
    fromAddress is an email address in behalf of whom the message is sent
    Return success/failure JSON structure
    """

        if not sendDict:
            result = ""
            gLogger.debug(result)
            return {"success": "false", "error": result}

        if not title:
            result = "title argument is missing"
            gLogger.debug(result)
            return {"success": "false", "error": result}

        if not body:
            result = "body argument is missing"
            gLogger.debug(result)
            return {"success": "false", "error": result}

        if not fromAddress:
            result = "fromAddress argument is missing"
            gLogger.debug(result)
            return {"success": "false", "error": result}

        sentSuccess = list()
        sentFailed = list()
        gLogger.debug("Initializing Notification client")
        ntc = NotificationClient(
            lambda x, timeout: RPCClient(x, timeout=timeout, static=True))

        for email, name in sendDict.iteritems():
            result = ntc.sendMail(email, title, body, fromAddress, False)
            if not result["OK"]:
                error = name + ": " + result["Message"]
                sentFailed.append(error)
                gLogger.error("Sent failure: ", error)
            else:
                gLogger.info("Successfully sent to %s" % name)
                sentSuccess.append(name)

        success = ", ".join(sentSuccess)
        failure = "\n".join(sentFailed)

        if success and failure:
            result = "Successfully sent e-mail to: "
            result = result + success + "\n\nFailed to send e-mail to:\n" + failure
            gLogger.debug(result)
            return {"success": "true", "result": result}
        elif success and len(failure) < 1:
            result = "Successfully sent e-mail to: %s" % success
            gLogger.debug(result)
            return {"success": "true", "result": result}
        elif len(success) < 1 and failure:
            result = "Failed to sent email to:\n%s" % failure
            gLogger.debug(result)
            return {"success": "false", "error": result}

        result = "No messages were sent due technical failure"
        gLogger.debug(result)
        return {"success": "false", "error": result}
Ejemplo n.º 39
0
 def msgFromClient(self, cliTrid, msgObj):
     gLogger.info("Message %s to %s service" % (msgObj.getName(), self.__byClient[cliTrid]["srvName"]))
     result = self.__byClient[cliTrid]["srvEnd"].sendMessage(msgObj)
     return result
Ejemplo n.º 40
0
    def moveFilesToDerivedTransformation(self, transDict, resetUnused=True):
        """ move files input to a transformation, to the derived one
    """
        prod = transDict['TransformationID']
        parentProd = int(transDict.get('InheritedFrom', 0))
        movedFiles = {}
        if not parentProd:
            gLogger.warn(
                "[None] [%d] .moveFilesToDerivedTransformation: Transformation was not derived..."
                % prod)
            return S_OK((parentProd, movedFiles))
        # get the lfns in status Unused/MaxReset of the parent production
        res = self.getTransformationFiles(condDict={
            'TransformationID': parentProd,
            'Status': ['Unused', 'MaxReset']
        })
        if not res['OK']:
            gLogger.error(
                "[None] [%d] .moveFilesToDerivedTransformation: Error getting Unused files from transformation %s:"
                % (prod, parentProd), res['Message'])
            return res
        parentFiles = res['Value']
        lfns = [lfnDict['LFN'] for lfnDict in parentFiles]
        if not lfns:
            gLogger.info(
                "[None] [%d] .moveFilesToDerivedTransformation: No files found to be moved from transformation %d"
                % (prod, parentProd))
            return S_OK((parentProd, movedFiles))
        # get the lfns of the derived production that were Unused/MaxReset in the parent one
        res = self.getTransformationFiles(condDict={
            'TransformationID': prod,
            'LFN': lfns
        })
        if not res['OK']:
            gLogger.error(
                "[None] [%d] .moveFilesToDerivedTransformation: Error getting files from derived transformation"
                % prod, res['Message'])
            return res
        derivedFiles = res['Value']
        derivedStatusDict = dict([(derivedDict['LFN'], derivedDict['Status'])
                                  for derivedDict in derivedFiles])
        newStatusFiles = {}
        parentStatusFiles = {}
        badStatusFiles = {}
        for parentDict in parentFiles:
            lfn = parentDict['LFN']
            derivedStatus = derivedStatusDict.get(lfn)
            if derivedStatus:
                parentStatus = parentDict['Status']
                # By default move to the parent status (which is Unused or MaxReset)
                status = parentStatus
                moveStatus = parentStatus
                # For MaxReset, set Unused if requested
                if parentStatus == 'MaxReset':
                    if resetUnused:
                        status = 'Unused'
                        moveStatus = 'Unused from MaxReset'
                    else:
                        status = 'MaxReset-inherited'
                if derivedStatus.endswith('-inherited'):
                    # This is the general case
                    newStatusFiles.setdefault((status, parentStatus),
                                              []).append(lfn)
                    movedFiles[moveStatus] = movedFiles.setdefault(
                        moveStatus, 0) + 1
                else:
                    badStatusFiles[derivedStatus] = badStatusFiles.setdefault(
                        derivedStatus, 0) + 1
                if parentStatus == 'Unused':
                    # If the file was Unused, set it NotProcessed in parent
                    parentStatusFiles.setdefault('NotProcessed',
                                                 []).append(lfn)
                else:
                    parentStatusFiles.setdefault('Moved', []).append(lfn)

        for status, count in badStatusFiles.items():
            gLogger.warn(
                '[None] [%d] .moveFilesToDerivedTransformation: Files found in an unexpected status in derived transformation'
                % prod, '%s: %d' % (status, count))
        # Set the status in the parent transformation first
        for status, lfnList in parentStatusFiles.items():
            for lfnChunk in breakListIntoChunks(lfnList, 5000):
                res = self.setFileStatusForTransformation(
                    parentProd, status, lfnChunk)
                if not res['OK']:
                    gLogger.error(
                        "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d "
                        % (prod, status, len(lfnList), parentProd),
                        res['Message'])

        # Set the status in the new transformation
        for (status, oldStatus), lfnList in newStatusFiles.items():
            for lfnChunk in breakListIntoChunks(lfnList, 5000):
                res = self.setFileStatusForTransformation(
                    prod, status, lfnChunk)
                if not res['OK']:
                    gLogger.error(
                        "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files; resetting them %s in transformation %d"
                        % (prod, status, len(lfnChunk), oldStatus, parentProd),
                        res['Message'])
                    res = self.setFileStatusForTransformation(
                        parentProd, oldStatus, lfnChunk)
                    if not res['OK']:
                        gLogger.error(
                            "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d"
                            % (prod, oldStatus, len(lfnChunk), parentProd),
                            res['Message'])
                else:
                    gLogger.info(
                        "[None] [%d] .moveFilesToDerivedTransformation: Successfully moved %d files from %s to %s"
                        % (prod, len(lfnChunk), oldStatus, status))

        # If files were Assigned or Unused at the time of derivation, try and update them as jobs may have run since then
        res = self.getTransformationFiles(
            condDict={
                'TransformationID': prod,
                'Status': ['Assigned-inherited', 'Unused-inherited']
            })
        if res['OK']:
            assignedFiles = res['Value']
            if assignedFiles:
                lfns = [lfnDict['LFN'] for lfnDict in assignedFiles]
                res = self.getTransformationFiles(condDict={
                    'TransformationID': parentProd,
                    'LFN': lfns
                })
                if res['OK']:
                    parentFiles = res['Value']
                    processedLfns = [
                        lfnDict['LFN'] for lfnDict in parentFiles
                        if lfnDict['Status'] == 'Processed'
                    ]
                    if processedLfns:
                        res = self.setFileStatusForTransformation(
                            prod, 'Processed-inherited', processedLfns)
                        if res['OK']:
                            gLogger.info(
                                "[None] [%d] .moveFilesToDerivedTransformation: set %d files to status %s"
                                % (prod, len(processedLfns),
                                   'Processed-inherited'))
        if not res['OK']:
            gLogger.error(
                "[None] [%d] .moveFilesToDerivedTransformation: Error setting status for Assigned derived files"
                % prod, res['Message'])

        return S_OK((parentProd, movedFiles))
Ejemplo n.º 41
0
 def conn_connected(self, trid, identity, kwargs):
     gLogger.info("[CML] CONN %s" % trid)
     gOptimizationMind.startOptimizer(trid)
     return S_OK()
Ejemplo n.º 42
0
    def web_hostAction(self):
        """
    Restart all DIRAC components on a given host
    """

        if not "host" in self.request.arguments:
            self.finish({"success": "false", "error": "No hostname defined"})
            return

        if not "action" in self.request.arguments:
            self.finish({"success": "false", "error": "No action defined"})
            return

        action = str(self.request.arguments["action"][0])
        hosts = self.request.arguments["host"][0].split(",")
        version = self.request.arguments["version"][0]

        userData = self.getSessionData()

        DN = str(userData["user"]["DN"])
        group = str(userData["user"]["group"])

        actionSuccess = list()
        actionFailed = list()

        for i in hosts:
            client = SystemAdministratorClient(str(i),
                                               None,
                                               delegatedDN=DN,
                                               delegatedGroup=group)
            if action == "restart":
                result = yield self.threadTask(client.restartComponent,
                                               str("*"), str("*"))
            elif action == "revert":
                result = yield self.threadTask(client.revertSoftware)
            elif action == "update":
                result = yield self.threadTask(client.updateSoftware,
                                               version,
                                               '',
                                               '',
                                               timeout=300)
            else:
                error = i + ": Action %s is not defined" % action
                actionFailed.append(error)
                continue

            gLogger.always(result)

            if not result["OK"]:
                if result["Message"].find("Unexpected EOF") > 0:
                    msg = "Signal 'Unexpected EOF' received: %s. Most likely DIRAC components" % result[
                        'Message']
                    msg = i + ": " + msg + " were successfully restarted."
                    actionSuccess.append(msg)
                    continue
                error = i + ": " + result["Message"]
                actionFailed.append(error)
                gLogger.error(error)
            else:
                gLogger.info(result["Value"])
                actionSuccess.append(i)

        self.finish(self.aftermath(actionSuccess, actionFailed, action,
                                   "Host"))
Ejemplo n.º 43
0
        resC = resourceStatus.setStorageElementStatus(se, 'CheckAccess',
                                                      'Active', reason,
                                                      userName)
        if not resC['OK']:
            gLogger.error("Failed to update %s check access to Active" % se)
        else:
            gLogger.notice("Successfully updated %s check access to Active" %
                           se)
            checkAllowed.append(se)

    if not (resR['OK'] or resW['OK'] or resC['OK']):
        DIRAC.exit(-1)

if not (writeAllowed or readAllowed or checkAllowed):
    gLogger.info("No storage elements were allowed")
    DIRAC.exit(-1)

if mute:
    gLogger.notice('Email is muted by script switch')
    DIRAC.exit(0)

subject = '%s storage elements allowed for use' % len(writeAllowed +
                                                      readAllowed +
                                                      checkAllowed)
addressPath = 'EMail/Production'
address = Operations().getValue(addressPath, '')

body = ''
if read:
    body = "%s\n\nThe following storage elements were allowed for reading:" % body
Ejemplo n.º 44
0
 def infoMsg(self, msg, dynMsg=""):
     gLogger.info("[%s] %s" % (self.__currentMethod, msg), dynMsg)
Ejemplo n.º 45
0
    def execute(self):
        """ The main agent execution method """

        # This allows dynamic changing of the throughput timescale
        self.throughputTimescale = self.am_getOption('ThroughputTimescale',
                                                     3600)
        self.throughputTimescale = 60 * 60 * 1
        #print 'ThroughputTimescale:',self.throughputTimescale
        ######################################################################################
        #
        #  Obtain information on the current state of the channel queues
        #

        res = self.TransferDB.getChannelQueues()
        if not res['OK']:
            errStr = "ReplicationScheduler._execute: Failed to get channel queues from TransferDB."
            gLogger.error(errStr, res['Message'])
            return S_OK()
        if not res['Value']:
            gLogger.info(
                "ReplicationScheduler._execute: No active channels found for replication."
            )
            return S_OK()
        channels = res['Value']

        res = self.TransferDB.getChannelObservedThroughput(
            self.throughputTimescale)
        if not res['OK']:
            errStr = "ReplicationScheduler._execute: Failed to get observed throughput from TransferDB."
            gLogger.error(errStr, res['Message'])
            return S_OK()
        if not res['Value']:
            gLogger.info(
                "ReplicationScheduler._execute: No active channels found for replication."
            )
            return S_OK()
        bandwidths = res['Value']

        self.strategyHandler = StrategyHandler(bandwidths, channels,
                                               self.section)

        processedRequests = []
        requestsPresent = True
        while requestsPresent:

            ######################################################################################
            #
            #  The first step is to obtain a transfer request from the RequestDB which should be scheduled.
            #

            gLogger.info(
                "ReplicationScheduler._execute: Contacting RequestDB for suitable requests."
            )
            res = self.RequestDB.getRequest('transfer')
            if not res['OK']:
                gLogger.error(
                    "ReplicationScheduler._execute: Failed to get a request list from RequestDB.",
                    res['Message'])
                continue
            if not res['Value']:
                gLogger.info(
                    "ReplicationScheduler._execute: No requests found in RequestDB."
                )
                requestsPresent = False
                return S_OK()
            requestString = res['Value']['RequestString']
            requestName = res['Value']['RequestName']
            gLogger.info(
                "ReplicationScheduler._execute: Obtained Request %s from RequestDB."
                % (requestName))

            ######################################################################################
            #
            #  The request must then be parsed to obtain the sub-requests, their attributes and files.
            #

            logStr = 'ReplicationScheduler._execute: Parsing Request %s.' % (
                requestName)
            gLogger.info(logStr)
            oRequest = RequestContainer(requestString)
            res = oRequest.getAttribute('RequestID')
            if not res['OK']:
                gLogger.error(
                    'ReplicationScheduler._execute: Failed to get requestID.',
                    res['Message'])
                return S_ERROR(
                    'ReplicationScheduler._execute: Failed to get number of sub-requests.'
                )
            requestID = res['Value']
            if requestID in processedRequests:
                # Break the loop once we have iterated once over all requests
                res = self.RequestDB.updateRequest(requestName, requestString)
                if not res['OK']:
                    gLogger.error("Failed to update request",
                                  "%s %s" % (requestName, res['Message']))
                return S_OK()

            processedRequests.append(requestID)

            res = oRequest.getNumSubRequests('transfer')
            if not res['OK']:
                gLogger.error(
                    'ReplicationScheduler._execute: Failed to get number of sub-requests.',
                    res['Message'])
                return S_ERROR(
                    'ReplicationScheduler._execute: Failed to get number of sub-requests.'
                )
            numberRequests = res['Value']
            gLogger.info(
                "ReplicationScheduler._execute: '%s' found with %s sub-requests."
                % (requestName, numberRequests))

            ######################################################################################
            #
            #  The important request attributes are the source and target SEs.
            #

            for ind in range(numberRequests):
                gLogger.info(
                    "ReplicationScheduler._execute: Treating sub-request %s from '%s'."
                    % (ind, requestName))
                attributes = oRequest.getSubRequestAttributes(
                    ind, 'transfer')['Value']
                if attributes['Status'] != 'Waiting':
                    #  If the sub-request is already in terminal state
                    gLogger.info(
                        "ReplicationScheduler._execute: Sub-request %s is status '%s' and  not to be executed."
                        % (ind, attributes['Status']))
                    continue

                sourceSE = attributes['SourceSE']
                targetSE = attributes['TargetSE']
                """ This section should go in the transfer request class """
                if type(targetSE) in types.StringTypes:
                    if re.search(',', targetSE):
                        targetSEs = targetSE.split(',')
                    else:
                        targetSEs = [targetSE]
                """----------------------------------------------------- """
                operation = attributes['Operation']
                reqRepStrategy = None
                if operation in self.strategyHandler.getSupportedStrategies():
                    reqRepStrategy = operation

                ######################################################################################
                #
                # Then obtain the file attribute of interest are the  LFN and FileID
                #

                res = oRequest.getSubRequestFiles(ind, 'transfer')
                if not res['OK']:
                    gLogger.error(
                        'ReplicationScheduler._execute: Failed to obtain sub-request files.',
                        res['Message'])
                    continue
                files = res['Value']
                gLogger.info(
                    "ReplicationScheduler._execute: Sub-request %s found with %s files."
                    % (ind, len(files)))
                filesDict = {}
                for file in files:
                    lfn = file['LFN']
                    if file['Status'] != 'Waiting':
                        gLogger.debug(
                            "ReplicationScheduler._execute: %s will not be scheduled because it is %s."
                            % (lfn, file['Status']))
                    else:
                        fileID = file['FileID']
                        filesDict[lfn] = fileID
                if not filesDict:
                    gLogger.info(
                        "ReplicationScheduler._execute: No Waiting files found for request"
                    )
                    continue
                notSched = len(files) - len(filesDict)
                if notSched:
                    gLogger.info(
                        "ReplicationScheduler._execute: %d files found not Waiting"
                        % notSched)

                ######################################################################################
                #
                #  Now obtain replica information for the files associated to the sub-request.
                #

                lfns = filesDict.keys()
                gLogger.info(
                    "ReplicationScheduler._execute: Obtaining replica information for %d sub-request files."
                    % len(lfns))
                res = self.rm.getCatalogReplicas(lfns)
                if not res['OK']:
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get replica information.",
                        res['Message'])
                    continue
                for lfn, failure in res['Value']['Failed'].items():
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get replicas.",
                        '%s: %s' % (lfn, failure))
                replicas = res['Value']['Successful']
                if not replicas.keys():
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get replica information for all files."
                    )
                    continue

                ######################################################################################
                #
                #  Now obtain the file sizes for the files associated to the sub-request.
                #

                lfns = replicas.keys()
                gLogger.info(
                    "ReplicationScheduler._execute: Obtaining file sizes for %d sub-request files."
                    % len(lfns))
                res = self.rm.getCatalogFileMetadata(lfns)
                if not res['OK']:
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get file size information.",
                        res['Message'])
                    continue
                for lfn, failure in res['Value']['Failed'].items():
                    gLogger.error(
                        'ReplicationScheduler._execute: Failed to get file size.',
                        '%s: %s' % (lfn, failure))
                metadata = res['Value']['Successful']
                if not metadata.keys():
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get metadata for all files."
                    )
                    continue

                ######################################################################################
                #
                # For each LFN determine the replication tree
                #

                for lfn in sortList(metadata.keys()):
                    fileSize = metadata[lfn]['Size']
                    lfnReps = replicas[lfn]
                    fileID = filesDict[lfn]

                    targets = []
                    for targetSE in targetSEs:
                        if targetSE in lfnReps.keys():
                            gLogger.debug(
                                "ReplicationScheduler.execute: %s already present at %s."
                                % (lfn, targetSE))
                        else:
                            targets.append(targetSE)
                    if not targets:
                        gLogger.info(
                            "ReplicationScheduler.execute: %s present at all targets."
                            % lfn)
                        oRequest.setSubRequestFileAttributeValue(
                            ind, 'transfer', lfn, 'Status', 'Done')
                        continue
                    if not lfnReps:
                        gLogger.error(
                            "ReplicationScheduler.execute: The file has no replicas.",
                            lfn)
                        continue
                    res = self.strategyHandler.determineReplicationTree(
                        sourceSE,
                        targets,
                        lfnReps,
                        fileSize,
                        strategy=reqRepStrategy)
                    if not res['OK']:
                        gLogger.error(
                            "ReplicationScheduler.execute: Failed to determine replication tree.",
                            res['Message'])
                        continue
                    tree = res['Value']

                    ######################################################################################
                    #
                    # For each item in the replication tree obtain the source and target SURLS
                    #

                    for channelID, dict in tree.items():
                        gLogger.info(
                            "ReplicationScheduler.execute: processing for channel %d %s"
                            % (channelID, str(dict)))
                        hopSourceSE = dict['SourceSE']
                        hopDestSE = dict['DestSE']
                        hopAncestor = dict['Ancestor']

                        # Get the sourceSURL
                        if hopAncestor:
                            status = 'Waiting%s' % (hopAncestor)
                            res = self.obtainLFNSURL(hopSourceSE, lfn)
                            if not res['OK']:
                                errStr = res['Message']
                                gLogger.error(errStr)
                                return S_ERROR(errStr)
                            sourceSURL = res['Value']
                        else:
                            status = 'Waiting'
                            res = self.resolvePFNSURL(hopSourceSE,
                                                      lfnReps[hopSourceSE])
                            if not res['OK']:
                                sourceSURL = lfnReps[hopSourceSE]
                            else:
                                sourceSURL = res['Value']

                        # Get the targetSURL
                        res = self.obtainLFNSURL(hopDestSE, lfn)
                        if not res['OK']:
                            errStr = res['Message']
                            gLogger.error(errStr)
                            return S_ERROR(errStr)
                        targetSURL = res['Value']

                        ######################################################################################
                        #
                        # For each item in the replication tree add the file to the channel
                        #
                        res = self.TransferDB.addFileToChannel(
                            channelID,
                            fileID,
                            hopSourceSE,
                            sourceSURL,
                            hopDestSE,
                            targetSURL,
                            fileSize,
                            fileStatus=status)
                        if not res['OK']:
                            errStr = res['Message']
                            gLogger.error(
                                "ReplicationScheduler._execute: Failed to add File to Channel.",
                                "%s %s" % (fileID, channelID))
                            return S_ERROR(errStr)
                        res = self.TransferDB.addFileRegistration(
                            channelID, fileID, lfn, targetSURL, hopDestSE)
                        if not res['OK']:
                            errStr = res['Message']
                            gLogger.error(
                                "ReplicationScheduler._execute: Failed to add File registration.",
                                "%s %s" % (fileID, channelID))
                            result = self.TransferDB.removeFileFromChannel(
                                channelID, fileID)
                            if not result['OK']:
                                errStr += result['Message']
                                gLogger.error(
                                    "ReplicationScheduler._execute: Failed to remove File.",
                                    "%s %s" % (fileID, channelID))
                            return S_ERROR(errStr)
                        oRequest.setSubRequestFileAttributeValue(
                            ind, 'transfer', lfn, 'Status', 'Scheduled')
                    res = self.TransferDB.addReplicationTree(fileID, tree)

                if oRequest.isSubRequestEmpty(ind, 'transfer')['Value']:
                    oRequest.setSubRequestStatus(ind, 'transfer', 'Scheduled')

            ################################################
            #  Generate the new request string after operation
            requestString = oRequest.toXML()['Value']
            res = self.RequestDB.updateRequest(requestName, requestString)
            if not res['OK']:
                gLogger.error(
                    "ReplicationScheduler._execute: Failed to update request",
                    "%s %s" % (requestName, res['Message']))
Ejemplo n.º 46
0
    def _getInfo(self, requestParams):

        gLogger.info(requestParams)

        if not requestParams['name']:
            gLogger.warn('No name given')
            self.finish({
                'success': 'false',
                'error': 'We need a Site Name to generate an Overview'
            })

        elementName = requestParams['name'][0]

        pub = RPCClient('ResourceStatus/Publisher')

        elementStatuses = pub.getElementStatuses('Site', str(elementName),
                                                 None, 'all', None, None)

        if not elementStatuses['OK']:
            gLogger.error(elementStatuses['Message'])
            self.finish({
                'success': 'false',
                'error': 'Error getting ElementStatus information'
            })

        if not elementStatuses['Value']:
            gLogger.error('element "%s" not found' % elementName)
            self.finish({
                'success': 'false',
                'error': 'element "%s" not found' % elementName
            })

        elementStatus = [
            dict(zip(elementStatuses['Columns'], element))
            for element in elementStatuses['Value']
        ][0]
        elementStatus['DateEffective'] = str(elementStatus['DateEffective'])
        elementStatus['LastCheckTime'] = str(elementStatus['LastCheckTime'])
        elementStatus['TokenExpiration'] = str(
            elementStatus['TokenExpiration'])

        gocdb_name = getGOCSiteName(elementName)
        if not gocdb_name['OK']:
            gLogger.error(gocdb_name['Message'])
            elementStatus['GOCDB'] = ""
            gocdb_name = ''
        else:
            gocdb_name = gocdb_name['Value']
            elementStatus[
                'GOCDB'] = '<a href="https://goc.egi.eu/portal/index.php?Page_Type=Submit_Search&SearchString=%s" target="_blank">%s</a>' % (
                    gocdb_name, gocdb_name)

        dirac_names = getDIRACSiteName(gocdb_name)
        if not dirac_names['OK']:
            gLogger.error(dirac_names['Message'])
            dirac_names = []
        else:
            elementStatus['GOCDB'] += "("
            for i in dirac_names['Value']:
                elementStatus['GOCDB'] += "%s " % i
            elementStatus['GOCDB'] += ")"

        elementStatus[
            "GGUS"] = '<a href="https://ggus.eu/ws/ticket_search.php?show_columns_check[]=REQUEST_ID&show_columns_check[]=TICKET_TYPE&show_columns_check[]=AFFECTED_VO&'
        elementStatus[
            "GGUS"] += 'show_columns_check[]=AFFECTED_SITE&show_columns_check[]=PRIORITY&show_columns_check[]=RESPONSIBLE_UNIT&show_columns_check[]=STATUS&show_columns_check[]=DATE_OF_CREATION&'

        elementStatus[
            "GGUS"] += 'show_columns_check[]=LAST_UPDATE&show_columns_check[]=TYPE_OF_PROBLEM&show_columns_check[]=SUBJECT&ticket=&supportunit=all&su_hierarchy=all&vo=all&user=&keyword=&involvedsupporter=&assignto=&'
        elementStatus[
            "GGUS"] += 'affectedsite=%s&specattrib=0&status=open&priority=all&typeofproblem=all&ticketcategory=&mouarea=&technology_provider=&date_type=creation+date&radiotf=1&timeframe=any&from_date=&to_date=&' % gocdb_name
        elementStatus[
            "GGUS"] += 'untouched_date=&orderticketsby=GHD_INT_REQUEST_ID&orderhow=descending" target="_blank"> %s tickets</a>' % gocdb_name

        convertName = {
            'CERN-PROD': 'CERN',
            'INFN-T1': 'CNAF',
            'FZK-LCG2': 'GridKa',
            'IN2P3-CC': 'IN2P3',
            'NIKHEF-ELPROD': 'NIKHEF',
            'pic': 'PIC',
            'RAL-LCG2': 'RAL',
            'SARA-MATRIX': 'SARA'
        }

        elog = convertName.get(gocdb_name, "")

        elementStatus[
            'Elog'] = '<a href="https://lblogbook.cern.ch/Operations/?Site=^' + elog + '%24&mode=summary" target="_blank">' + elog + '</a>'

        return {
            'success': 'true',
            'result': elementStatus,
            'total': len(elementStatus)
        }
Ejemplo n.º 47
0
    def newAlarm(self, alarmDef):
        """Create a new alarm record"""
        followers = []
        for field in self.__newAlarmMandatoryFields:
            if field not in alarmDef:
                return S_ERROR("Oops. Missing %s" % field)
            result = self.__checkAlarmField(field, alarmDef[field])
            if not result["OK"]:
                return result
            if field == "assignee":
                followers = result["Value"]
        author = alarmDef["author"]
        if author not in followers:
            followers.append(author)

        sqlFieldsName = []
        sqlFieldsValue = []
        for field in self.__newAlarmMandatoryFields:
            if field == "notifications":
                notifications = {}
                for nType in self.__validAlarmNotifications:
                    if nType in alarmDef[field]:
                        notifications[nType] = 1
                    else:
                        notifications[nType] = 0
                val = DEncode.encode(notifications)
            else:
                val = alarmDef[field]
            # Add to the list of fields to add
            sqlFieldsName.append(field)
            result = self._escapeString(val)
            if result["OK"]:
                sqlFieldsValue.append(result["Value"])
            else:
                return S_ERROR("Failed to escape value %s" % val)
        sqlFieldsName.extend(["CreationTime", "ModTime"])
        sqlFieldsValue.extend(["UTC_TIMESTAMP()", "UTC_TIMESTAMP()"])

        # Get the defined alarmkey and generate a random one if not defined
        if "alarmKey" in alarmDef:
            result = self._escapeString(alarmDef["alarmKey"])
            if result["OK"]:
                alarmKey = result["Value"]
            else:
                return S_ERROR("Failed to escape value %s for key AlarmKey" % val)
            gLogger.info("Checking there are no alarms with key %s" % alarmKey)
            result = self._query("SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey)
            if not result["OK"]:
                return result
            if result["Value"]:
                return S_ERROR("Oops, alarm with id %s has the same alarm key!" % result["Value"][0][0])
        else:
            alarmKey = str(time.time())[-31:]
        sqlFieldsName.append("AlarmKey")
        sqlFieldsValue.append(alarmKey)

        sqlInsert = "INSERT INTO `ntf_Alarms` (%s) VALUES (%s)" % (",".join(sqlFieldsName), ",".join(sqlFieldsValue))

        result = self._update(sqlInsert)
        if not result["OK"]:
            return result
        alarmId = result["lastRowId"]
        for follower in followers:
            result = self.modifyFollowerForAlarm(alarmId, follower, notifications)
            if not result["OK"]:
                varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % (follower, alarmId, result["Message"])
                self.log.error("Couldn't set follower for alarm", varMsg)
        self.__notifyAlarm(alarmId)
        return S_OK(alarmId)
Ejemplo n.º 48
0
 def __verifyPfns(self, pfnSizes, storageElements):
     gLogger.info('Checking %s storage files exist in the catalog' %
                  len(pfnSizes))
     pfnsToRemove = []
     incorrectlyRegistered = []
     allDone = True
     # First get all the PFNs as they should be registered in the catalog
     for pfns in breakListIntoChunks(sortList(pfnSizes.keys()), 100):
         res = self.replicaManager.getPfnForProtocol(pfns,
                                                     storageElements[0],
                                                     withPort=False)
         if not res['OK']:
             allDone = False
             continue
         for pfn, error in res['Value']['Failed'].items():
             gLogger.error(
                 'Failed to obtain registered PFN for physical file',
                 '%s %s' % (pfn, error))
         if res['Value']['Failed']:
             allDone = False
         catalogStoragePfns = res['Value']['Successful']
         # Determine whether these PFNs are registered and if so obtain the LFN
         res = self.replicaManager.getCatalogLFNForPFN(
             catalogStoragePfns.values())
         if not res['OK']:
             allDone = False
             continue
         for surl in sortList(res['Value']['Failed'].keys()):
             if res['Value']['Failed'][surl] == 'No such file or directory':
                 #pfnsToRemove.append(surl)
                 print surl
             else:
                 gLogger.error(
                     'Failed to get LFN for PFN',
                     '%s %s' % (surl, res['Value']['Failed'][surl]))
         existingLFNs = res['Value']['Successful'].values()
         if existingLFNs:
             res = self.replicaManager.getCatalogReplicas(existingLFNs)
             if not res['OK']:
                 allDone = False
                 continue
             for lfn, error in res['Value']['Failed'].items():
                 gLogger.error(
                     'Failed to obtain registered replicas for LFN',
                     '%s %s' % (lfn, error))
             if res['Value']['Failed']:
                 allDone = False
             for lfn, replicas in res['Value']['Successful'].items():
                 match = False
                 for storageElement in storageElements:
                     if storageElement in replicas.keys():
                         match = True
                 if not match:
                     pass  #incorrectlyRegistered.append(lfn)
                     #print lfn
     gLogger.info("Verification of PFNs complete")
     if incorrectlyRegistered:
         gLogger.info("Found %d files incorrectly registered" %
                      len(incorrectlyRegistered))
     if pfnsToRemove:
         gLogger.info("Found %d files to be removed" % len(pfnsToRemove))
     resDict = {
         'Remove': pfnsToRemove,
         'ReRegister': incorrectlyRegistered,
         'AllDone': allDone
     }
     return S_OK(resDict)
  import DIRAC
  from DIRAC import gLogger

  args = Script.getPositionalArgs()

  requestName = None
  targetSEs = None
  if len( args ) < 3:
    Script.showHelp()
    DIRAC.exit( 1 )

  requestName = args[0]
  lfnList = getLFNList( args[1] )
  targetSEs = list( set( [ se for targetSE in args[2:] for se in targetSE.split( ',' ) ] ) )

  gLogger.info( "Will create request '%s' with 'ReplicateAndRegister' "\
                "operation using %s lfns and %s target SEs" % ( requestName, len( lfnList ), len( targetSEs ) ) )

  from DIRAC.RequestManagementSystem.Client.Request import Request
  from DIRAC.RequestManagementSystem.Client.Operation import Operation
  from DIRAC.RequestManagementSystem.Client.File import File
  from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
  from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
  from DIRAC.Core.Utilities.List import breakListIntoChunks

  lfnChunks = breakListIntoChunks( lfnList, 100 )
  multiRequests = len( lfnChunks ) > 1

  error = 0
  count = 0
  reqClient = ReqClient()
  fc = FileCatalog()
Ejemplo n.º 50
0
    def transfer_fromClient(self, fileId, token, fileSize, fileHelper):
        """
        Receive a file as a sandbox
        """

        if self.__maxUploadBytes and fileSize > self.__maxUploadBytes:
            fileHelper.markAsTransferred()
            return S_ERROR(
                "Sandbox is too big. Please upload it to a grid storage element"
            )

        if isinstance(fileId, (list, tuple)):
            if len(fileId) > 1:
                assignTo = fileId[1]
                fileId = fileId[0]
            else:
                return S_ERROR(
                    "File identified tuple has to have length greater than 1")
        else:
            assignTo = {}

        extPos = fileId.find(".tar")
        if extPos > -1:
            extension = fileId[extPos + 1:]
            aHash = fileId[:extPos]
        else:
            extension = ""
            aHash = fileId
        gLogger.info("Upload requested", f"for {aHash} [{extension}]")

        credDict = self.getRemoteCredentials()
        sbPath = self.__getSandboxPath("%s.%s" % (aHash, extension))
        # Generate the location
        result = self.__generateLocation(sbPath)
        if not result["OK"]:
            return result
        seName, sePFN = result["Value"]

        result = self.sandboxDB.getSandboxId(seName, sePFN,
                                             credDict["username"],
                                             credDict["group"])
        if result["OK"]:
            gLogger.info("Sandbox already exists. Skipping upload")
            fileHelper.markAsTransferred()
            sbURL = "SB:%s|%s" % (seName, sePFN)
            assignTo = dict([(key, [(sbURL, assignTo[key])])
                             for key in assignTo])
            result = self.export_assignSandboxesToEntities(assignTo)
            if not result["OK"]:
                return result
            return S_OK(sbURL)

        if self.__useLocalStorage:
            hdPath = self.__sbToHDPath(sbPath)
        else:
            hdPath = False
        # Write to local file
        result = self.__networkToFile(fileHelper, hdPath)
        if not result["OK"]:
            gLogger.error("Error while receiving sandbox file",
                          result["Message"])
            return result
        hdPath = result["Value"]
        gLogger.info("Wrote sandbox to file", hdPath)
        # Check hash!
        if fileHelper.getHash() != aHash:
            self.__secureUnlinkFile(hdPath)
            gLogger.error(
                "Hashes don't match! Client defined hash is different with received data hash!"
            )
            return S_ERROR("Hashes don't match!")
        # If using remote storage, copy there!
        if not self.__useLocalStorage:
            gLogger.info("Uploading sandbox to external storage")
            result = self.__copyToExternalSE(hdPath, sbPath)
            self.__secureUnlinkFile(hdPath)
            if not result["OK"]:
                return result
            sbPath = result["Value"][1]
        # Register!
        gLogger.info("Registering sandbox in the DB with",
                     f"SB:{self.__seNameToUse}|{sbPath}")
        result = self.sandboxDB.registerAndGetSandbox(
            credDict["username"],
            credDict["DN"],
            credDict["group"],
            self.__seNameToUse,
            sbPath,
            fileHelper.getTransferedBytes(),
        )
        if not result["OK"]:
            self.__secureUnlinkFile(hdPath)
            return result

        sbURL = f"SB:{self.__seNameToUse}|{sbPath}"
        assignTo = dict([(key, [(sbURL, assignTo[key])]) for key in assignTo])
        result = self.export_assignSandboxesToEntities(assignTo)
        if not result["OK"]:
            return result
        return S_OK(sbURL)
Ejemplo n.º 51
0
                request = RequestContainer()
                result = request.addSubRequest(
                    {
                        'Attributes': {
                            'Operation': 'removePhysicalFile',
                            'TargetSE': SEName,
                            'ExecutionOrder': 1
                        }
                    }, 'removal')
                index = result['Value']
                fileDict = {'PFN': SEPFN, 'Status': 'Waiting'}
                request.setSubRequestFiles(index, 'removal', [fileDict])
                return RequestClient().setRequest(
                    "RemoteSBDeletion:%s|%s:%s" % (SEName, SEPFN, time.time()),
                    request.toXML()['Value'])
            except Exception, e:
                gLogger.exception("Exception while setting deletion request")
                return S_ERROR("Cannot set deletion request: %s" % str(e))
        else:
            gLogger.info("Deleting external Sandbox")
            try:
                rm = ReplicaManager()
                return rm.removeStorageFile(SEPFN, SEName)
            except Exception, e:
                gLogger.exception(
                    "RM raised an exception while trying to delete a remote sandbox"
                )
                return S_ERROR(
                    "RM raised an exception while trying to delete a remote sandbox"
                )
Ejemplo n.º 52
0
    def export_submitJob(self, jobDesc):
        """ Submit a single job to DIRAC WMS
    """

        if self.peerUsesLimitedProxy:
            return S_ERROR("Can't submit using a limited proxy! (bad boy!)")

        # Check job submission permission
        result = self.jobPolicy.getJobPolicy()
        if not result['OK']:
            return S_ERROR('Failed to get job policies')
        policyDict = result['Value']
        if not policyDict[RIGHT_SUBMIT]:
            return S_ERROR('Job submission not authorized')

        #jobDesc is JDL for now
        jobDesc = jobDesc.strip()
        if jobDesc[0] != "[":
            jobDesc = "[%s" % jobDesc
        if jobDesc[-1] != "]":
            jobDesc = "%s]" % jobDesc

        # Check if the job is a parameteric one
        jobClassAd = ClassAd(jobDesc)
        parametricJob = False
        if jobClassAd.lookupAttribute('Parameters'):
            parametricJob = True
            if jobClassAd.isAttributeList('Parameters'):
                parameterList = jobClassAd.getListFromExpression('Parameters')
            else:
                pStep = 0
                pFactor = 1
                pStart = 1
                nParameters = jobClassAd.getAttributeInt('Parameters')
                if not nParameters:
                    value = jobClassAd.get_expression('Parameters')
                    return S_ERROR(
                        'Illegal value for Parameters JDL field: %s' % value)

                if jobClassAd.lookupAttribute('ParameterStart'):
                    value = jobClassAd.get_expression(
                        'ParameterStart').replace('"', '')
                    try:
                        pStart = int(value)
                    except:
                        try:
                            pStart = float(value)
                        except:
                            return S_ERROR(
                                'Illegal value for ParameterStart JDL field: %s'
                                % value)

                if jobClassAd.lookupAttribute('ParameterStep'):
                    pStep = jobClassAd.getAttributeInt('ParameterStep')
                    if not pStep:
                        pStep = jobClassAd.getAttributeFloat('ParameterStep')
                        if not pStep:
                            value = jobClassAd.get_expression('ParameterStep')
                            return S_ERROR(
                                'Illegal value for ParameterStep JDL field: %s'
                                % value)
                if jobClassAd.lookupAttribute('ParameterFactor'):
                    pFactor = jobClassAd.getAttributeInt('ParameterFactor')
                    if not pFactor:
                        pFactor = jobClassAd.getAttributeFloat(
                            'ParameterFactor')
                        if not pFactor:
                            value = jobClassAd.get_expression(
                                'ParameterFactor')
                            return S_ERROR(
                                'Illegal value for ParameterFactor JDL field: %s'
                                % value)

                parameterList = list()
                parameterList.append(pStart)
                for i in range(nParameters - 1):
                    parameterList.append(parameterList[i] * pFactor + pStep)

            if len(parameterList) > self.maxParametricJobs:
                return S_ERROR(
                    'The number of parametric jobs exceeded the limit of %d' %
                    self.maxParametricJobs)

            jobDescList = []
            nParam = len(parameterList) - 1
            for n, p in enumerate(parameterList):
                newJobDesc = jobDesc.replace('%s', str(p)).replace(
                    '%n',
                    str(n).zfill(len(str(nParam))))
                newClassAd = ClassAd(newJobDesc)
                for attr in ['Parameters', 'ParameterStep', 'ParameterFactor']:
                    newClassAd.deleteAttribute(attr)
                if type(p) == type(' ') and p.startswith('{'):
                    newClassAd.insertAttributeInt('Parameter', str(p))
                else:
                    newClassAd.insertAttributeString('Parameter', str(p))
                newClassAd.insertAttributeInt('ParameterNumber', n)
                newJDL = newClassAd.asJDL()
                jobDescList.append(newJDL)
        else:
            jobDescList = [jobDesc]

        jobIDList = []
        for jobDescription in jobDescList:
            result = gJobDB.insertNewJobIntoDB(jobDescription, self.owner,
                                               self.ownerDN, self.ownerGroup,
                                               self.diracSetup)
            if not result['OK']:
                return result

            jobID = result['JobID']
            gLogger.info('Job %s added to the JobDB for %s/%s' %
                         (jobID, self.ownerDN, self.ownerGroup))

            gJobLoggingDB.addLoggingRecord(jobID,
                                           result['Status'],
                                           result['MinorStatus'],
                                           source='JobManager')

            jobIDList.append(jobID)

        #Set persistency flag
        retVal = gProxyManager.getUserPersistence(self.ownerDN,
                                                  self.ownerGroup)
        if 'Value' not in retVal or not retVal['Value']:
            gProxyManager.setPersistency(self.ownerDN, self.ownerGroup, True)

        if parametricJob:
            result = S_OK(jobIDList)
        else:
            result = S_OK(jobIDList[0])

        result['JobID'] = result['Value']
        result['requireProxyUpload'] = self.__checkIfProxyUploadIsRequired()
        self.__sendNewJobsToMind(jobIDList)
        return result
Ejemplo n.º 53
0
def test_DataIntegrityDB():
    """Some test cases"""
    source = "Test"
    prognosis = "TestError"
    prodID = 1234
    timestamp = int(time.time())
    lfn = "/Test/%08d/File1/%d" % (prodID, timestamp)
    pfn = "File1/%d" % (timestamp)
    fileMetadata1 = {
        lfn: {
            "Prognosis": prognosis,
            "PFN": pfn,
            "SE": "Test-SE"
        }
    }
    fileOut1 = {
        "LFN": lfn,
        "PFN": pfn,
        "Prognosis": prognosis,
        "GUID": None,
        "SE": "Test-SE",
        "Size": None
    }
    newStatus = "Solved"
    newPrognosis = "AnotherError"

    diDB = DataIntegrityDB()

    # Clean up the database if required
    result = diDB.getTransformationProblematics(1234)
    assert result["OK"], result["Message"]
    for fileID in result["Value"].values():
        result = diDB.removeProblematic(fileID)
        assert result["OK"], result["Message"]
    result = diDB.getProblematicsSummary()
    assert result["OK"], result["Message"]
    assert result["Value"] == {}

    # Run the actual test
    result = diDB.insertProblematic(source, fileMetadata1)
    assert result["OK"], result["Message"]
    assert result["Value"] == {"Successful": {lfn: True}, "Failed": {}}

    result = diDB.insertProblematic(source, fileMetadata1)
    assert result["OK"], result["Message"]
    assert result["Value"] == {
        "Successful": {
            lfn: "Already exists"
        },
        "Failed": {}
    }

    result = diDB.getProblematicsSummary()
    assert result["OK"], result["Message"]
    assert result["Value"] == {"TestError": {"New": 1}}

    result = diDB.getDistinctPrognosis()
    assert result["OK"], result["Message"]
    assert result["Value"] == ["TestError"]

    result = diDB.getProblematic()
    assert result["OK"], result["Message"]
    fileOut1["FileID"] = result["Value"]["FileID"]
    assert result["Value"] == fileOut1

    result = diDB.incrementProblematicRetry(result["Value"]["FileID"])
    assert result["OK"], result["Message"]
    assert result["Value"] == 1

    result = diDB.getProblematic()
    assert result["OK"], result["Message"]
    assert result["Value"] == fileOut1

    result = diDB.getPrognosisProblematics(prognosis)
    assert result["OK"], result["Message"]
    assert result["Value"] == [fileOut1]

    result = diDB.getTransformationProblematics(prodID)
    assert result["OK"], result["Message"]
    assert result["Value"][lfn] == fileOut1["FileID"]

    result = diDB.setProblematicStatus(fileOut1["FileID"], newStatus)
    assert result["OK"], result["Message"]
    assert result["Value"] == 1

    result = diDB.changeProblematicPrognosis(fileOut1["FileID"], newPrognosis)
    assert result["OK"], result["Message"]
    assert result["Value"] == 1

    result = diDB.getPrognosisProblematics(prognosis)
    assert result["OK"], result["Message"]
    assert result["Value"] == []

    result = diDB.removeProblematic(fileOut1["FileID"])
    assert result["OK"], result["Message"]
    assert result["Value"] == 1

    result = diDB.getProblematicsSummary()
    assert result["OK"], result["Message"]
    assert result["Value"] == {}

    gLogger.info("\n OK\n")
Ejemplo n.º 54
0

  def __loadReleaseNotesFile( self ):
    if not self.params.relNotes:
      relNotes = os.path.join( self.params.destination, self.params.name, "release.notes" )
    else:
      relNotes = self.params.relNotes
    if not os.path.isfile( relNotes ):
      return S_OK( "" )
    try:
      fd = open( relNotes, "r" )
      relaseContents = fd.readlines()
      fd.close()
    except Exception, excp:
      return S_ERROR( "Could not open %s: %s" % ( relNotes, excp ) )
    gLogger.info( "Loaded %s" % relNotes )
    relData = []
    version = False
    feature = False
    lastKey = False
    for rawLine in relaseContents:
      line = rawLine.strip()
      if not line:
        continue
      if line[0] == "[" and line[-1] == "]":
        version = line[1:-1].strip()
        relData.append( ( version, { 'comment' : [], 'features' : [] } ) )
        feature = False
        lastKey = False
        continue
      if line[0] == "*":
Ejemplo n.º 55
0
    def bulk_index(self,
                   indexprefix,
                   doc_type,
                   data,
                   mapping=None,
                   period=None):
        """
    :param str indexPrefix: it is the index name.
    :param str doc_type: the type of the document
    :param dict data: contains a list of dictionary
    :paran dict mapping: the mapping used by elasticsearch
    :param str period: We can specify, which kind of indexes will be created.
                       Currently only daily and monthly indexes are supported.
    """
        gLogger.info("%d records will be insert to %s" % (len(data), doc_type))
        if mapping is None:
            mapping = {}

        indexName = generateFullIndexName(indexprefix, period)
        gLogger.debug("inserting datat to %s index" % indexName)
        if not self.exists(indexName):
            retVal = self.createIndex(indexprefix, mapping, period)
            if not retVal['OK']:
                return retVal
        docs = []
        for row in data:
            body = {'_index': indexName, '_type': doc_type, '_source': {}}
            body['_source'] = row

            if 'timestamp' not in row:
                gLogger.warn(
                    "timestamp is not given! Note: the actual time is used!")

            # if the timestamp is not provided, we use the current utc time.
            timestamp = row.get('timestamp', int(Time.toEpoch()))
            try:
                if isinstance(timestamp, datetime):
                    body['_source']['timestamp'] = int(
                        timestamp.strftime('%s')) * 1000
                elif isinstance(timestamp, basestring):
                    timeobj = datetime.strptime(timestamp,
                                                '%Y-%m-%d %H:%M:%S.%f')
                    body['_source']['timestamp'] = int(
                        timeobj.strftime('%s')) * 1000
                else:  # we assume  the timestamp is an unix epoch time (integer).
                    body['_source']['timestamp'] = timestamp * 1000
            except (TypeError, ValueError) as e:
                # in case we are not able to convert the timestamp to epoch time....
                gLogger.error("Wrong timestamp", e)
                body['_source']['timestamp'] = int(Time.toEpoch()) * 1000
            docs += [body]
        try:
            res = bulk(self.__client, docs, chunk_size=self.__chunk_size)
        except BulkIndexError as e:
            return S_ERROR(e)

        if res[0] == len(docs):
            # we have inserted all documents...
            return S_OK(len(docs))
        else:
            return S_ERROR(res)
        return res
Ejemplo n.º 56
0
if months:
    totalDays += 30 * months
if days:
    totalDays += days

res = getProxyInfo(False, False)
if not res['OK']:
    gLogger.error("Failed to get client proxy information.", res['Message'])
    DIRAC.exit(2)
proxyInfo = res['Value']
username = proxyInfo['username']
userBase = '/%s/user/%s/%s' % (vo, username[0], username)
if not baseDir:
    baseDir = userBase

gLogger.info('Will search for files in %s' % baseDir)
activeDirs = [baseDir]

allFiles = []
emptyDirs = []
while len(activeDirs) > 0:
    currentDir = activeDirs[0]
    res = rm.getCatalogListDirectory(currentDir, verbose)
    activeDirs.remove(currentDir)
    if not res['OK']:
        gLogger.error("Error retrieving directory contents",
                      "%s %s" % (currentDir, res['Message']))
    elif res['Value']['Failed'].has_key(currentDir):
        gLogger.error(
            "Error retrieving directory contents",
            "%s %s" % (currentDir, res['Value']['Failed'][currentDir]))
Ejemplo n.º 57
0
def initializeGASWServiceHandler(serviceInfo):

    gLogger.info("================================")
    gLogger.info("= Initializing GASW Service 0.1")
    gLogger.info("================================")
    return S_OK()
Ejemplo n.º 58
0
            finalCatList.append(possibleCat)
        except Exception, x:
            gLogger.debug('Ignoring non-POOL catalogue file %s' % possibleCat)

    #Create POOL catalog with final list of catalog files and extract GUIDs
    generated = []
    pfnGUIDs = {}
    gLogger.debug('Final list of catalog files are: %s' %
                  string.join(finalCatList, ', '))
    catalog = PoolXMLCatalog(finalCatList)
    for fname in fileNames:
        guid = str(catalog.getGuidByPfn(fname))
        if not guid:
            guid = makeGuid(fname)
            generated.append(fname)

        pfnGUIDs[fname] = guid

    if not generated:
        gLogger.info('Found GUIDs from POOL XML Catalogue for all files: %s' %
                     string.join(fileNames, ', '))
    else:
        gLogger.info(
            'GUIDs not found from POOL XML Catalogue (and were generated) for: %s'
            % string.join(generated, ', '))

    result = S_OK(pfnGUIDs)
    result['directory'] = directory
    result['generated'] = generated
    return result
Ejemplo n.º 59
0
 def exec_deserializeTask(cls, taskStub):
   gLogger.info("DESERIALIZE %s" % taskStub)
   return S_OK(DEncode.decode(taskStub)[0])
Ejemplo n.º 60
0
 def initialize(self):
     gLogger.info("Initializing Service")