Пример #1
0
  def _getConfigStorageName( self, storageName, referenceType ):
    """
      This gets the name of the storage the configuration service.
      If the storage is a reference to another SE the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
    configPath = '%s/%s' % ( self.rootConfigPath, storageName )
    res = gConfig.getOptions( configPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
      gLogger.error( errStr, res['Message'] )
      return S_ERROR( errStr )
    if not res['Value']:
      errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
      gLogger.error( errStr, configPath )
      return S_ERROR( errStr )
    if referenceType in res['Value']:
      configPath = cfgPath( self.rootConfigPath, storageName, referenceType )
      referenceName = gConfig.getValue( configPath )
      result = self._getConfigStorageName( referenceName, 'Alias' )
      if not result['OK']:
        return result
      resolvedName = result['Value']
    else:
      resolvedName = storageName
    return S_OK( resolvedName )
Пример #2
0
 def export_requestDelegationUpload( self, requestedUploadTime, userGroup ):
   """ Request a delegation. Send a delegation request to client
   """
   credDict = self.getRemoteCredentials()
   userDN = credDict[ 'DN' ]
   userName = credDict[ 'username' ]
   if not userGroup:
     userGroup = credDict[ 'group' ]
   retVal = Registry.getGroupsForUser( credDict[ 'username' ] )
   if not retVal[ 'OK' ]:
     return retVal
   groupsAvailable = retVal[ 'Value' ]
   if userGroup not in groupsAvailable:
     return S_ERROR( "%s is not a valid group for user %s" % ( userGroup, userName ) )
   clientChain = credDict[ 'x509Chain' ]
   clientSecs = clientChain.getIssuerCert()[ 'Value' ].getRemainingSecs()[ 'Value' ]
   requestedUploadTime = min( requestedUploadTime, clientSecs )
   retVal = self.__proxyDB.getRemainingTime( userDN, userGroup )
   if not retVal[ 'OK' ]:
     return retVal
   remainingSecs = retVal[ 'Value' ]
   # If we have a proxy longer than the one uploading it's not needed
   # ten minute margin to compensate just in case
   if remainingSecs >= requestedUploadTime - 600:
     gLogger.info( "Upload request not necessary by %s:%s" % ( userName, userGroup ) )
     return self.__addKnownUserProxiesInfo( S_OK() )
   result = self.__proxyDB.generateDelegationRequest( credDict[ 'x509Chain' ], userDN )
   if result[ 'OK' ]:
     gLogger.info( "Upload request by %s:%s given id %s" % ( userName, userGroup, result['Value']['id'] ) )
   else:
     gLogger.error( "Upload request failed", "by %s:%s : %s" % ( userName, userGroup, result['Message'] ) )
   return result
Пример #3
0
  def transfer_toClient( self, fileID, token, fileHelper ):
    """ Method to send files to clients.
fileID is the local file name in the SE.
token is used for access rights confirmation.
"""

    conn , error = self.__irodsClient( "r" )
    if not conn:
      return S_ERROR( error )

    file_path = self.__resolveFileID( fileID )
    file_path = IRODS_HOME + file_path
    gLogger.debug( "file_path to read: %s" % file_path )

    fd = iRodsOpen( conn , file_path , "r" )
    if not fd:
      rcDisconnect( conn )
      gLogger.error( "Failed to get file object" )
      return S_ERROR( "Failed to get file object" )

    result = fileHelper.FileToNetwork( fd )
    fd.close()

    rcDisconnect( conn )
    if not result[ "OK" ]:
      gLogger.error( "Failed to get file " + fileID )
      return S_ERROR( "Failed to get file " + fileID )
    else:
      return result
Пример #4
0
  def _getConfigStorageOptions( self, storageName ):
    """ Get the options associated to the StorageElement as defined in the CS
    """
    storageConfigPath = cfgPath( self.rootConfigPath, storageName )
    res = gConfig.getOptions( storageConfigPath )
    if not res['OK']:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )
    options = res['Value']
    optionsDict = {}
    for option in options:

      if option in [ 'ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess']:
        continue
      optionConfigPath = cfgPath( storageConfigPath, option )
      if option in [ 'VO' ]:
        optionsDict[option] = gConfig.getValue( optionConfigPath, [] )
      else:
        optionsDict[option] = gConfig.getValue( optionConfigPath, '' )

    res = self.resourceStatus.getStorageElementStatus( storageName )
    if not res[ 'OK' ]:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )

    # For safety, we did not add the ${statusType}Access keys
    # this requires modifications in the StorageElement class

    # We add the dictionary with the statusTypes and values
    # { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
    optionsDict.update( res[ 'Value' ][ storageName ] )

    return S_OK( optionsDict )
Пример #5
0
def initializeStorageElementProxyHandler( serviceInfo ):
  """ handler initialisation """

  global BASE_PATH, HTTP_FLAG, HTTP_PORT, HTTP_PATH
  cfgPath = serviceInfo['serviceSectionPath']

  BASE_PATH = gConfig.getValue( "%s/BasePath" % cfgPath, BASE_PATH )
  if not BASE_PATH:
    gLogger.error( 'Failed to get the base path' )
    return S_ERROR( 'Failed to get the base path' )
  
  gLogger.info('The base path obtained is %s. Checking its existence...' % BASE_PATH)
  if not os.path.exists(BASE_PATH):
    gLogger.info('%s did not exist. Creating....' % BASE_PATH)
    os.makedirs(BASE_PATH)

  HTTP_FLAG = gConfig.getValue( "%s/HttpAccess" % cfgPath, False )
  if HTTP_FLAG:
    HTTP_PATH = '%s/httpCache' % BASE_PATH
    HTTP_PATH = gConfig.getValue( "%s/HttpCache" % cfgPath, HTTP_PATH )
    if not os.path.exists( HTTP_PATH ):
      gLogger.info('Creating HTTP cache directory %s' % (HTTP_PATH) )
      os.makedirs( HTTP_PATH )
    HTTP_PORT = gConfig.getValue( "%s/HttpPort" % cfgPath, 9180 )
    gLogger.info('Creating HTTP server thread, port:%d, path:%s' % ( HTTP_PORT, HTTP_PATH ) )
    httpThread = HttpThread( HTTP_PORT, HTTP_PATH )

  return S_OK()
Пример #6
0
 def __unlinkOldLog(self, filePath):
     try:
         gLogger.info("Unlinking file %s" % filePath)
         os.unlink(filePath)
     except Exception, e:
         gLogger.error("Can't unlink old log file", "%s: %s" % (filePath, str(e)))
         return 1
Пример #7
0
 def setProxyStrength( self, arg ):
   try:
     self.proxyStrength = int( arg )
   except:
     gLogger.error( "Can't parse bits! Is it a number?", '%s' % arg )
     return S_ERROR( "Can't parse strength argument" )
   return S_OK()
Пример #8
0
 def unassignEntities( self, entitiesDict, requesterName, requesterGroup ):
   """
   Unassign jobs to sandboxes
   entitiesDict = { 'setup' : [ 'entityId', 'entityId' ] }
   """
   updated = 0
   for entitySetup in entitiesDict:
     entitiesIds = entitiesDict[ entitySetup ]
     if not entitiesIds:
       continue
     escapedSetup = self._escapeString( entitySetup )[ 'Value' ]
     result = self.__filterEntitiesByRequester( entitiesIds, escapedSetup, requesterName, requesterGroup )
     if not result[ 'OK' ]:
       gLogger.error( "Cannot filter entities: %s" % result[ 'Message' ] )
       continue
     ids = result[ 'Value' ]
     if not ids:
       return S_OK( 0 )
     sqlCond = [ "EntitySetup = %s" % escapedSetup ]
     sqlCond.append( "EntityId in ( %s )" % ", ".join ( [ "'%s'" % str( eid ) for eid in ids ] ) )
     sqlCmd = "DELETE FROM `sb_EntityMapping` WHERE %s" % " AND ".join( sqlCond )
     result = self._update( sqlCmd )
     if not result[ 'OK' ]:
       gLogger.error( "Cannot unassign entities: %s" % result[ 'Message' ] )
     else:
       updated += 1
   return S_OK( updated )
Пример #9
0
def getComputingElements():
  """
    Gets all computing elements from /Resources/Sites/<>/<>/CE
  """
  _basePath = 'Resources/Sites'

  ces = []

  domainNames = gConfig.getSections(_basePath)
  if not domainNames['OK']:
    return domainNames
  domainNames = domainNames['Value']

  for domainName in domainNames:
    domainSites = gConfig.getSections('%s/%s' % (_basePath, domainName))
    if not domainSites['OK']:
      return domainSites
    domainSites = domainSites['Value']

    for site in domainSites:
      siteCEs = gConfig.getSections('%s/%s/%s/CEs' % (_basePath, domainName, site))
      if not siteCEs['OK']:
        # return siteCEs
        gLogger.error(siteCEs['Message'])
        continue
      siteCEs = siteCEs['Value']
      ces.extend(siteCEs)

  # Remove duplicated ( just in case )
  ces = list(set(ces))

  return S_OK(ces)
Пример #10
0
 def addHost( self, hostname, properties ):
   """
   Add a host to the cs
     - hostname
     - properties is a dict with keys:
       - DN
       - Properties
       - <extra params>
   Returns True/False
   """
   if not self.__initialized[ 'OK' ]:
     return self.__initialized
   for prop in ( "DN", ):
     if prop not in properties:
       gLogger.error( "Missing %s property for host %s" % ( prop, hostname ) )
       return S_OK( False )
   if hostname in self.listHosts()['Value']:
     gLogger.error( "Host %s is already registered" % hostname )
     return S_OK( False )
   self.__csMod.createSection( "%s/Hosts/%s" % ( self.__baseSecurity, hostname ) )
   for prop in properties:
     self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
   gLogger.info( "Registered host %s" % hostname )
   self.__csModified = True
   return S_OK( True )
Пример #11
0
 def modifyHost( self, hostname, properties, createIfNonExistant = False ):
   """
   Modify a user
     - hostname
     - properties is a dict with keys:
       - DN
       - Properties
       - <extra params>
   Returns True/False
   """
   if not self.__initialized[ 'OK' ]:
     return self.__initialized
   modifiedHost = False
   hostData = self.describeHosts( [ hostname ] )['Value']
   if hostname not in hostData:
     if createIfNonExistant:
       gLogger.info( "Registering host %s" % hostname )
       return self.addHost( hostname, properties )
     gLogger.error( "Host %s is not registered" % hostname )
     return S_OK( False )
   for prop in properties:
     prevVal = self.__csMod.getValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ) )
     if not prevVal or prevVal != properties[ prop ]:
       gLogger.info( "Setting %s property for host %s to %s" % ( prop, hostname, properties[ prop ] ) )
       self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
       modifiedHost = True
   if modifiedHost:
     gLogger.info( "Modified host %s" % hostname )
     self.__csModified = True
   else:
     gLogger.info( "Nothing to modify for host %s" % hostname )
   return S_OK( True )
Пример #12
0
  def export_enforcePolicies(self, granularity, name, useNewRes = True):
    """ Enforce all the policies. If `useNewRes` is False, use cached results only (where available).
    """
    try:
      gLogger.info("ResourceManagementHandler.enforcePolicies: Attempting to enforce policies for %s %s" % (granularity, name))
      try:
        reason = serviceType = resourceType = None 

        res = rsDB.getStuffToCheck(granularity, name = name)[0]
        status = res[1]
        formerStatus = res[2]
        siteType = res[3]
        tokenOwner = res[len(res)-1]
        if granularity == 'Resource':
          resourceType = res[4]
        elif granularity == 'Service':
          serviceType = res[4]
        
        from DIRAC.ResourceStatusSystem.PolicySystem.PEP import PEP
        pep = PEP(VOExtension, granularity, name, status, formerStatus, reason, siteType, 
                  serviceType, resourceType, tokenOwner, useNewRes)
        pep.enforce(rsDBIn = rsDB, rmDBIn = rmDB)
        
      except RSSDBException, x:
        gLogger.error(whoRaised(x))
      except RSSException, x:
        gLogger.error(whoRaised(x))
Пример #13
0
 def modifyGroup( self, groupname, properties, createIfNonExistant = False ):
   """
   Modify a user
     - groupname
     - properties is a dict with keys:
       - Users
       - Properties
       - <extra params>
   Returns True/False
   """
   if not self.__initialized[ 'OK' ]:
     return self.__initialized
   modifiedGroup = False
   groupData = self.describeGroups( [ groupname ] )['Value']
   if groupname not in groupData:
     if createIfNonExistant:
       gLogger.info( "Registering group %s" % groupname )
       return self.addGroup( groupname, properties )
     gLogger.error( "Group %s is not registered" % groupname )
     return S_OK( False )
   for prop in properties:
     prevVal = self.__csMod.getValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ) )
     if not prevVal or prevVal != properties[ prop ]:
       gLogger.info( "Setting %s property for group %s to %s" % ( prop, groupname, properties[ prop ] ) )
       self.__csMod.setOptionValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ), properties[ prop ] )
       modifiedGroup = True
   if modifiedGroup:
     gLogger.info( "Modified group %s" % groupname )
     self.__csModified = True
   else:
     gLogger.info( "Nothing to modify for group %s" % groupname )
   return S_OK( True )
Пример #14
0
 def __checkIfProxyUploadIsRequired( self ):
   result = gProxyManager.userHasProxy( self.ownerDN, self.ownerGroup, validSeconds = 18000 )
   if not result[ 'OK' ]:
     gLogger.error( "Can't check if the user has proxy uploaded:", result[ 'Message' ] )
     return True
   #Check if an upload is required
   return result[ 'Value' ] == False
Пример #15
0
    def run(self):
        """ The main watchdog execution method
    """

        result = self.initialize()
        if not result['OK']:
            gLogger.always('Can not start wtchdog for the following reason')
            gLogger.always(result['Message'])
            return result

        try:
            while True:
                gLogger.debug('Starting agent loop # %d' % self.count)
                start_cycle_time = time.time()
                result = self.execute()
                exec_cycle_time = time.time() - start_cycle_time
                if not result['OK']:
                    gLogger.error("Watchdog error during execution",
                                  result['Message'])
                    break
                elif result['Value'] == "Ended":
                    break
                self.count += 1
                if exec_cycle_time < self.pollingTime:
                    time.sleep(self.pollingTime - exec_cycle_time)
            return S_OK()
        except Exception, x:
            gLogger.exception()
            return S_ERROR('Exception')
Пример #16
0
 def export_getTransformationProblematics( transID ):
   """ Get the problematics for a given transformation """
   gLogger.info( "DataIntegrityHandler.getTransformationProblematics: Getting problematics for transformation." )
   res = gDataIntegrityDB.getTransformationProblematics( transID )
   if not res['OK']:
     gLogger.error( "DataIntegrityHandler.getTransformationProblematics: Failed.", res['Message'] )
   return res
Пример #17
0
  def getMailDict(self , names=None):
  
    """
    Convert list of usernames to dict like { e-mail : full name }
    Argument is a list. Return value is a dict
    """

    resultDict = dict()
    if not names:
      return resultDict
    
    for user in names:
      email = gConfig.getValue("/Registry/Users/%s/Email" % user , "")
      gLogger.debug("/Registry/Users/%s/Email - '%s'" % (user , email))
      emil = email.strip()
      
      if not email:
        gLogger.error("Can't find value for option /Registry/Users/%s/Email" % user)
        continue

      fname = gConfig.getValue("/Registry/Users/%s/FullName" % user , "")
      gLogger.debug("/Registry/Users/%s/FullName - '%s'" % (user , fname))
      fname = fname.strip()

      if not fname:
        fname = user
        gLogger.debug("FullName is absent, name to be used: %s" % fname)

      resultDict[ email ] = fname

    return resultDict
Пример #18
0
 def execute(self):
   """ This is called by the Agent Reactor
   """
   res = self.ovc.getSites()
   if not res['OK']:
     return res
   sitedict = {}
   sites = res['Value']
   gLogger.info("Will update info for sites %s" % sites)
   for site in sites:
     attribdict = {"Site" : site, "ApplicationStatus": 'Getting overlay files'}
     res = self.jobmon.getCurrentJobCounters(attribdict)
     if not res['OK']:
       continue
     if res['Value'].has_key('Running'):
       sitedict[site] = res['Value']['Running']
     else:
       sitedict[site] = 0
   gLogger.info("Setting new values %s" % sitedict)    
   res = self.ovc.setJobsAtSites(sitedict)
   if not res['OK']:
     gLogger.error(res['Message'])
     return res
   
   return S_OK()
Пример #19
0
 def export_changeProblematicPrognosis( fileID, newPrognosis ):
   """ Change the prognosis for the supplied file """
   gLogger.info( "DataIntegrityHandler.changeProblematicPrognosis: Changing problematic prognosis." )
   res = gDataIntegrityDB.changeProblematicPrognosis( fileID, newPrognosis )
   if not res['OK']:
     gLogger.error( "DataIntegrityHandler.changeProblematicPrognosis: Failed to update.", res['Message'] )
   return res
Пример #20
0
 def getDirectoryMetadata( self, path ):
   """ Get the metadata for the directory
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.getDirectoryMetadata: Attempting to get metadata for %s directories." % len( urls ) )
   res = self.isDirectory( urls )
   if not res['OK']:
     return res
   successful = {}
   failed = res['Value']['Failed']
   directories = []
   for url, isDirectory in res['Value']['Successful'].items():
     if isDirectory:
       directories.append( url )
     else:
       errStr = "RFIOStorage.getDirectoryMetadata: Directory does not exist."
       gLogger.error( errStr, url )
       failed[url] = errStr
   res = self.__getPathMetadata( directories )
   if not res['OK']:
     return res
   else:
     failed.update( res['Value']['Failed'] )
     successful = res['Value']['Successful']
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
 def do_deleteType( self, args ):
   """
   Delete a registered accounting type.
     Usage : deleteType <typeName>
     WARN! It will delete all data associated to that type! VERY DANGEROUS!
     If you screw it, you'll discover a new dimension of pain and doom! :)
   """
   try:
     argList = args.split()
     if argList:
       typeName = argList[0].strip()
     else:
       gLogger.error( "No type name specified" )
       return
     while True:
       choice = raw_input( "Are you completely sure you want to delete type %s and all it's data? yes/no [no]: " % typeName)
       choice = choice.lower()
       if choice in ( "yes", "y" ):
         break
       else:
         print "Delete aborted"
         return
     acClient = RPCClient( "Accounting/DataStore" )
     retVal = acClient.deleteType( typeName )
     if not retVal[ 'OK' ]:
       gLogger.error( "Error: %s" % retVal[ 'Message' ] )
       return
     print "Hope you meant it, because it's done"
   except:
     self.showTraceback()
Пример #22
0
 def isFile( self, path ):
   """Check if the given path exists and it is a file
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.isFile: Determining whether %s paths are files." % len( urls ) )
   successful = {}
   failed = {}
   comm = "nsls -ld"
   for url in urls:
     comm = " %s %s" % ( comm, url )
   res = shellCall( self.timeout, comm )
   if not res['OK']:
     return res
   returncode, stdout, stderr = res['Value']
   if returncode in [0, 1]:
     for line in stdout.splitlines():
       permissions, _subdirs, _owner, _group, _size, _month, _date, _timeYear, pfn = line.split()
       if permissions[0] != 'd':
         successful[pfn] = True
       else:
         successful[pfn] = False
     for line in stderr.splitlines():
       pfn, error = line.split( ': ' )
       url = pfn.strip()
       failed[url] = error
   else:
     errStr = "RFIOStorage.isFile: Completely failed to determine whether path is file."
     gLogger.error( errStr, "%s %s" % ( self.name, stderr ) )
     return S_ERROR( errStr )
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Пример #23
0
  def getDirectory( self, path, localPath = False ):
    """ Get locally a directory from the physical storage together with all its files and subdirectories.
    """
    res = self.__checkArgumentFormat( path )
    if not res['OK']:
      return res
    urls = res['Value']

    successful = {}
    failed = {}
    gLogger.debug( "RFIOStorage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
    for src_directory in urls:
      dirName = os.path.basename( src_directory )
      if localPath:
        dest_dir = "%s/%s" % ( localPath, dirName )
      else:
        dest_dir = "%s/%s" % ( os.getcwd(), dirName )
      res = self.__getDir( src_directory, dest_dir )
      if res['OK']:
        if res['Value']['AllGot']:
          gLogger.debug( "RFIOStorage.getDirectory: Successfully got local copy of %s" % src_directory )
          successful[src_directory] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
        else:
          gLogger.error( "RFIOStorage.getDirectory: Failed to get entire directory.", src_directory )
          failed[src_directory] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
      else:
        gLogger.error( "RFIOStorage.getDirectory: Completely failed to get local copy of directory.", src_directory )
        failed[src_directory] = {'Files':0, 'Size':0}
    resDict = {'Failed':failed, 'Successful':successful}
    return S_OK( resDict )
Пример #24
0
 def prestageFile( self, path ):
   """ Issue prestage request for file
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   userTag = '%s-%s' % ( self.spaceToken, time.time() )
   comm = "stager_get -S %s -U %s " % ( self.spaceToken, userTag )
   for url in urls:
     comm = "%s -M %s" % ( comm, url )
   res = shellCall( 100, comm )
   successful = {}
   failed = {}
   if res['OK']:
     returncode, stdout, stderr = res['Value']
     if returncode in [0, 1]:
       for line in stdout.splitlines():
         if re.search( 'SUBREQUEST_READY', line ):
           pfn, _status = line.split()
           successful[pfn] = userTag
         elif re.search( 'SUBREQUEST_FAILED', line ):
           pfn, _status, err = line.split( ' ', 2 )
           failed[pfn] = err
     else:
       errStr = "RFIOStorage.prestageFile: Got unexpected return code from stager_get."
       gLogger.error( errStr, stderr )
       return S_ERROR( errStr )
   else:
     errStr = "RFIOStorage.prestageFile: Completely failed to issue stage requests."
     gLogger.error( errStr, res['Message'] )
     return S_ERROR( errStr )
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Пример #25
0
 def exists( self, path ):
   """ Check if the given path exists. The 'path' variable can be a string or a list of strings.
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.exists: Determining the existance of %s files." % len( urls ) )
   comm = "nsls -d"
   for url in urls:
     comm = " %s %s" % ( comm, url )
   res = shellCall( self.timeout, comm )
   successful = {}
   failed = {}
   if res['OK']:
     returncode, stdout, stderr = res['Value']
     if returncode in [0, 1]:
       for line in stdout.splitlines():
         url = line.strip()
         successful[url] = True
       for line in stderr.splitlines():
         pfn, _ = line.split( ': ' )
         url = pfn.strip()
         successful[url] = False
     else:
       errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
       gLogger.error( errStr, "%s %s" % ( self.name, stderr ) )
       return S_ERROR( errStr )
   else:
     errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
     gLogger.error( errStr, "%s %s" % ( self.name, res['Message'] ) )
     return S_ERROR( errStr )
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Пример #26
0
  def __logResult( methodName, result ):
    '''
      Method that writes to log error messages
    '''

    if not result[ 'OK' ]:
      gLogger.error( '%s%s' % ( methodName, result[ 'Message' ] ) )
Пример #27
0
    def _getDirectoryContent(directory):
      """ Inner function: recursively scan a directory, returns list of LFNs
      """
      filesInDirectory = {}

      gLogger.debug("Examining %s" % directory)

      res = self.fileCatalog.listDirectory(directory)
      if not res['OK']:
        gLogger.error('Failed to get directory contents', res['Message'])
        return res
      if directory in res['Value']['Failed']:
        gLogger.error('Failed to get directory content', '%s %s' %
                      (directory, res['Value']['Failed'][directory]))
        return S_ERROR('Failed to get directory content')
      if directory not in res['Value']['Successful']:
        return S_ERROR('Directory not existing?')

      # first, adding the files found in the current directory
      gLogger.debug("Files in %s: %d" % (directory, len(
          res['Value']['Successful'][directory]['Files'])))
      filesInDirectory.update(res['Value']['Successful'][directory]['Files'])

      # then, looking for subDirectories content
      if res['Value']['Successful'][directory]['SubDirs']:
        for l_dir in res['Value']['Successful'][directory]['SubDirs']:
          # recursion here
          subDirContent = _getDirectoryContent(l_dir)
          if not subDirContent['OK']:
            return subDirContent
          else:
            filesInDirectory.update(subDirContent['Value'])

      return S_OK(filesInDirectory)
Пример #28
0
  def putDirectory( self, path ):
    """ Put a local directory to the physical storage together with all its files and subdirectories.
    """
    res = checkArgumentFormat( path )
    if not res['OK']:
      return res
    urls = res['Value']

    successful = {}
    failed = {}
    gLogger.debug( "RFIOStorage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
    for destDir, sourceDir in urls.items():
      res = self.__putDir( sourceDir, destDir )
      if res['OK']:
        if res['Value']['AllPut']:
          gLogger.debug( "RFIOStorage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
          successful[destDir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
        else:
          gLogger.error( "RFIOStorage.putDirectory: Failed to put entire directory to remote storage.", destDir )
          failed[destDir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
      else:
        gLogger.error( "RFIOStorage.putDirectory: Completely failed to put directory to remote storage.", destDir )
        failed[destDir] = {'Files':0, 'Size':0}
    resDict = {'Failed':failed, 'Successful':successful}
    return S_OK( resDict )
 def __generateReleaseNotes( self ):
   result = self.__loadReleaseNotesFile()
   if not result[ 'OK' ]:
     return result
   releaseData = result[ 'Value' ]
   if not releaseData:
     gLogger.info( "release.notes not found. Trying to find releasenotes.rst" )
     for rstFileName in ( "releasenotes.rst", "releasehistory.rst" ):
       result = self.__compileReleaseNotes( rstFileName )
       if result[ 'OK' ]:
         gLogger.notice( "Compiled %s file!" % rstFileName )
       else:
         gLogger.warn( result[ 'Message' ] )
     return S_OK()
   gLogger.info( "Loaded release.notes" )
   for rstFileName, singleVersion in ( ( "releasenotes.rst", True ),
                                       ( "releasehistory.rst", False ) ):
     result = self.__generateRSTFile( releaseData, rstFileName, self.params.version,
                                      singleVersion )
     if not result[ 'OK' ]:
       gLogger.error( "Could not generate %s: %s" % ( rstFileName, result[ 'Message' ] ) )
       continue
     result = self.__compileReleaseNotes( rstFileName )
     if not result[ 'OK' ]:
       gLogger.error( "Could not compile %s: %s" % ( rstFileName, result[ 'Message' ] ) )
       continue
     gLogger.notice( "Compiled %s file!" % rstFileName )
   return S_OK()
Пример #30
0
 def removeDirectory( self, path, recursive = False ):
   """Remove a directory on the physical storage together with all its files and
      subdirectories.
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.removeDirectory: Attempting to remove %s directories." % len( urls ) )
   successful = {}
   failed = {}
   for url in urls:
     comm = "nsrm -r %s" % url
     res = shellCall( 100, comm )
     if res['OK']:
       returncode, _stdout, stderr = res['Value']
       if returncode == 0:
         successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
       elif returncode == 1:
         successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
       else:
         failed[url] = stderr
     else:
       errStr = "RFIOStorage.removeDirectory: Completely failed to remove directory."
       gLogger.error( errStr, "%s %s" % ( url, res['Message'] ) )
       failed[url] = res['Message']
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Пример #31
0
    def _processInThread(self, clientTransport):
        """
    This method handles a RPC, FileTransfer or Connection.
    Connection may be opened via ServiceReactor.__acceptIncomingConnection


    - Do the SSL/TLS Handshake (if dips is used) and extract credentials
    - Get the action called by the client
    - Check if the client is authorized to perform ation
      - If not, connection is closed
    - Instanciate the RequestHandler (RequestHandler contain all methods callable)

    (Following is not directly in this method but it describe what happen at
    #Execute the action)
    - Notify the client we're ready to execute the action (via _processProposal)
      and call RequestHandler._rh_executeAction()
    - Receive arguments/file/something else (depending on action) in the RequestHandler
    - Executing the action asked by the client

    :param clientTransport: Object who describe the opened connection (SSLTransport or PlainTransport)

    :return: S_OK with "closeTransport" a boolean to indicate if th connection have to be closed
            e.g. after RPC, closeTransport=True

    """
        self.__maxFD = max(self.__maxFD, clientTransport.oSocket.fileno())
        self._lockManager.lockGlobal()
        try:
            monReport = self.__startReportToMonitoring()
        except Exception:
            monReport = False
        try:
            #Handshake
            try:
                result = clientTransport.handshake()
                if not result['OK']:
                    clientTransport.close()
                    return
            except:
                return
            #Add to the transport pool
            trid = self._transportPool.add(clientTransport)
            if not trid:
                return
            #Receive and check proposal
            result = self._receiveAndCheckProposal(trid)
            if not result['OK']:
                self._transportPool.sendAndClose(trid, result)
                return
            proposalTuple = result['Value']
            #Instantiate handler
            result = self._instantiateHandler(trid, proposalTuple)
            if not result['OK']:
                self._transportPool.sendAndClose(trid, result)
                return
            handlerObj = result['Value']
            #Execute the action
            result = self._processProposal(trid, proposalTuple, handlerObj)
            #Close the connection if required
            if result['closeTransport'] or not result['OK']:
                if not result['OK']:
                    gLogger.error("Error processing proposal",
                                  result['Message'])
                self._transportPool.close(trid)
            return result
        finally:
            self._lockManager.unlockGlobal()
            if monReport:
                self.__endReportToMonitoring(*monReport)
Пример #32
0
                requests += [
                    reqID.strip() for line in lines
                    for reqID in line.split(',')
                ]
                gLogger.notice("Found %d requests in file" % len(requests))
            else:
                requests.append(arg)
            allR = True
    else:
        res = reqClient.getRequestIDsForJobs(jobs)
        if not res['OK']:
            gLogger.fatal("Error getting request for jobs", res['Message'])
            DIRAC.exit(2)
        if res['Value']['Failed']:
            gLogger.error(
                "No request found for jobs %s" %
                ','.join(sorted(str(job) for job in res['Value']['Failed'])))
        requests = sorted(res['Value']['Successful'].values())
        if requests:
            allR = True
        else:
            DIRAC.exit(0)

    if status and not requests:
        allR = allR or status != 'Failed'
        res = reqClient.getRequestIDsList([status],
                                          limit=maxRequests,
                                          since=since,
                                          until=until)

        if not res['OK']:
Пример #33
0
def main():
    catalog = None
    Script.registerSwitch("C:", "Catalog=", "Catalog to use")
    # Registering arguments will automatically add their description to the help menu
    Script.registerArgument(" requestName:  a request name")
    Script.registerArgument(" LFNs:         single LFN or file with LFNs")
    Script.registerArgument(["targetSE:     target SE"])
    Script.parseCommandLine()
    for switch in Script.getUnprocessedSwitches():
        if switch[0] == "C" or switch[0].lower() == "catalog":
            catalog = switch[1]

    args = Script.getPositionalArgs()

    requestName = None
    targetSEs = None
    if len(args) < 3:
        Script.showHelp(exitCode=1)

    requestName = args[0]
    lfnList = getLFNList(args[1])
    targetSEs = list(
        set([se for targetSE in args[2:] for se in targetSE.split(",")]))

    gLogger.info("Will create request '%s' with 'ReplicateAndRegister' "
                 "operation using %s lfns and %s target SEs" %
                 (requestName, len(lfnList), len(targetSEs)))

    from DIRAC.RequestManagementSystem.Client.Request import Request
    from DIRAC.RequestManagementSystem.Client.Operation import Operation
    from DIRAC.RequestManagementSystem.Client.File import File
    from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
    from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
    from DIRAC.Core.Utilities.List import breakListIntoChunks

    lfnChunks = breakListIntoChunks(lfnList, 100)
    multiRequests = len(lfnChunks) > 1

    error = 0
    count = 0
    reqClient = ReqClient()
    fc = FileCatalog()
    requestIDs = []
    for lfnChunk in lfnChunks:
        metaDatas = fc.getFileMetadata(lfnChunk)
        if not metaDatas["OK"]:
            gLogger.error("unable to read metadata for lfns: %s" %
                          metaDatas["Message"])
            error = -1
            continue
        metaDatas = metaDatas["Value"]
        for failedLFN, reason in metaDatas["Failed"].items():
            gLogger.error("skipping %s: %s" % (failedLFN, reason))
        lfnChunk = set(metaDatas["Successful"])

        if not lfnChunk:
            gLogger.error("LFN list is empty!!!")
            error = -1
            continue

        if len(lfnChunk) > Operation.MAX_FILES:
            gLogger.error(
                "too many LFNs, max number of files per operation is %s" %
                Operation.MAX_FILES)
            error = -1
            continue

        count += 1
        request = Request()
        request.RequestName = requestName if not multiRequests else "%s_%d" % (
            requestName, count)

        replicateAndRegister = Operation()
        replicateAndRegister.Type = "ReplicateAndRegister"
        replicateAndRegister.TargetSE = ",".join(targetSEs)
        if catalog is not None:
            replicateAndRegister.Catalog = catalog

        for lfn in lfnChunk:
            metaDict = metaDatas["Successful"][lfn]
            opFile = File()
            opFile.LFN = lfn
            opFile.Size = metaDict["Size"]

            if "Checksum" in metaDict:
                # # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD'
                opFile.Checksum = metaDict["Checksum"]
                opFile.ChecksumType = "ADLER32"
            replicateAndRegister.addFile(opFile)

        request.addOperation(replicateAndRegister)

        putRequest = reqClient.putRequest(request)
        if not putRequest["OK"]:
            gLogger.error("unable to put request '%s': %s" %
                          (request.RequestName, putRequest["Message"]))
            error = -1
            continue
        requestIDs.append(str(putRequest["Value"]))
        if not multiRequests:
            gLogger.always(
                "Request '%s' has been put to ReqDB for execution." %
                request.RequestName)

    if multiRequests:
        gLogger.always(
            "%d requests have been put to ReqDB for execution, with name %s_<num>"
            % (count, requestName))
    if requestIDs:
        gLogger.always("RequestID(s): %s" % " ".join(requestIDs))
    gLogger.always(
        "You can monitor requests' status using command: 'dirac-rms-request <requestName/ID>'"
    )
    DIRAC.exit(error)
Пример #34
0
    def addShifter(self, shifters=None):
        """
    Adds or modify one or more shifters. Also, adds the shifter section in case this is not present.
    Shifter identities are used in several places, mostly for running agents

    shifters should be in the form {'ShifterRole':{'User':'******', 'Group':'aDIRACGroup'}}

    :return: S_OK/S_ERROR
    """
        def getOpsSection():
            """
      Where is the shifters section?
      """
            vo = CSGlobals.getVO()
            setup = CSGlobals.getSetup()

            if vo:
                res = gConfig.getSections('/Operations/%s/%s/Shifter' %
                                          (vo, setup))
                if res['OK']:
                    return S_OK('/Operations/%s/%s/Shifter' % (vo, setup))

                res = gConfig.getSections('/Operations/%s/Defaults/Shifter' %
                                          vo)
                if res['OK']:
                    return S_OK('/Operations/%s/Defaults/Shifter' % vo)

            else:
                res = gConfig.getSections('/Operations/%s/Shifter' % setup)
                if res['OK']:
                    return S_OK('/Operations/%s/Shifter' % setup)

                res = gConfig.getSections('/Operations/Defaults/Shifter')
                if res['OK']:
                    return S_OK('/Operations/Defaults/Shifter')

            return S_ERROR("No shifter section")

        if shifters is None: shifters = {}
        if not self.__initialized['OK']:
            return self.__initialized

        # get current shifters
        opsH = Operations()
        currentShifterRoles = opsH.getSections('Shifter')
        if not currentShifterRoles['OK']:
            # we assume the shifter section is not present
            currentShifterRoles = []
        else:
            currentShifterRoles = currentShifterRoles['Value']
        currentShiftersDict = {}
        for currentShifterRole in currentShifterRoles:
            currentShifter = opsH.getOptionsDict('Shifter/%s' %
                                                 currentShifterRole)
            if not currentShifter['OK']:
                return currentShifter
            currentShifter = currentShifter['Value']
            currentShiftersDict[currentShifterRole] = currentShifter

        # Removing from shifters what does not need to be changed
        for sRole in shifters:
            if sRole in currentShiftersDict:
                if currentShiftersDict[sRole] == shifters[sRole]:
                    shifters.pop(sRole)

        # get shifters section to modify
        section = getOpsSection()

        # Is this section present?
        if not section['OK']:
            if section['Message'] == "No shifter section":
                gLogger.warn(section['Message'])
                gLogger.info("Adding shifter section")
                vo = CSGlobals.getVO()
                if vo:
                    section = '/Operations/%s/Defaults/Shifter' % vo
                else:
                    section = '/Operations/Defaults/Shifter'
                res = self.__csMod.createSection(section)
                if not res:
                    gLogger.error("Section %s not created" % section)
                    return S_ERROR("Section %s not created" % section)
            else:
                gLogger.error(section['Message'])
                return section
        else:
            section = section['Value']

        #add or modify shifters
        for shifter in shifters:
            self.__csMod.removeSection(section + '/' + shifter)
            self.__csMod.createSection(section + '/' + shifter)
            self.__csMod.createSection(section + '/' + shifter + '/' + 'User')
            self.__csMod.createSection(section + '/' + shifter + '/' + 'Group')
            self.__csMod.setOptionValue(section + '/' + shifter + '/' + 'User',
                                        shifters[shifter]['User'])
            self.__csMod.setOptionValue(
                section + '/' + shifter + '/' + 'Group',
                shifters[shifter]['Group'])

        self.__csModified = True
        return S_OK(True)
Пример #35
0
    def modifyUser(self, username, properties, createIfNonExistant=False):
        """
    Modify a user
      :param str username: group name
      :param dict properties: dictionary describing user properties:

        - DN
        - Groups
        - <extra params>

      :return True/False
    """
        if not self.__initialized['OK']:
            return self.__initialized
        modifiedUser = False
        userData = self.describeUsers([username])['Value']
        if username not in userData:
            if createIfNonExistant:
                gLogger.info("Registering user %s" % username)
                return self.addUser(username, properties)
            gLogger.error("User is not registered", username)
            return S_OK(False)
        for prop in properties:
            if prop == "Groups":
                continue
            prevVal = self.__csMod.getValue(
                "%s/Users/%s/%s" % (self.__baseSecurity, username, prop))
            if not prevVal or prevVal != properties[prop]:
                gLogger.info("Setting %s property for user %s to %s" %
                             (prop, username, properties[prop]))
                self.__csMod.setOptionValue(
                    "%s/Users/%s/%s" % (self.__baseSecurity, username, prop),
                    properties[prop])
                modifiedUser = True
        if 'Groups' in properties:
            groups = self.listGroups()['Value']
            for userGroup in properties['Groups']:
                if not userGroup in groups:
                    gLogger.error("User group is not a valid group",
                                  "%s %s" % (username, userGroup))
                    return S_OK(False)
            groupsToBeDeletedFrom = []
            groupsToBeAddedTo = []
            for prevGroup in userData[username]['Groups']:
                if prevGroup not in properties['Groups']:
                    groupsToBeDeletedFrom.append(prevGroup)
                    modifiedUser = True
            for newGroup in properties['Groups']:
                if newGroup not in userData[username]['Groups']:
                    groupsToBeAddedTo.append(newGroup)
                    modifiedUser = True
            for group in groupsToBeDeletedFrom:
                self.__removeUserFromGroup(group, username)
                gLogger.info("Removed user %s from group %s" %
                             (username, group))
            for group in groupsToBeAddedTo:
                self.__addUserToGroup(group, username)
                gLogger.info("Added user %s to group %s" % (username, group))
        if modifiedUser:
            gLogger.info("Modified user %s" % username)
            self.__csModified = True
        else:
            gLogger.info("Nothing to modify for user %s" % username)
        return S_OK(True)
Пример #36
0
    def export_getTabbedSummaryWeb(self, table, requestedTables, selectDict,
                                   sortList, startItem, maxItems):
        tableDestinations = {
            'Transformations': {
                'TransformationFiles': ['TransformationID'],
                'TransformationTasks': ['TransformationID']
            },
            'TransformationFiles': {
                'Transformations': ['TransformationID'],
                'TransformationTasks': ['TransformationID', 'TaskID']
            },
            'TransformationTasks': {
                'Transformations': ['TransformationID'],
                'TransformationFiles': ['TransformationID', 'TaskID']
            }
        }

        tableSelections = {
            'Transformations': [
                'TransformationID', 'AgentType', 'Type', 'TransformationGroup',
                'Plugin'
            ],
            'TransformationFiles':
            ['TransformationID', 'TaskID', 'Status', 'UsedSE', 'TargetSE'],
            'TransformationTasks':
            ['TransformationID', 'TaskID', 'ExternalStatus', 'TargetSE']
        }

        tableTimeStamps = {
            'Transformations': 'CreationDate',
            'TransformationFiles': 'LastUpdate',
            'TransformationTasks': 'CreationTime'
        }

        tableStatusColumn = {
            'Transformations': 'Status',
            'TransformationFiles': 'Status',
            'TransformationTasks': 'ExternalStatus'
        }

        resDict = {}
        res = self.__getTableSummaryWeb(table,
                                        selectDict,
                                        sortList,
                                        startItem,
                                        maxItems,
                                        selectColumns=tableSelections[table],
                                        timeStamp=tableTimeStamps[table],
                                        statusColumn=tableStatusColumn[table])
        if not res['OK']:
            gLogger.error("Failed to get Summary for table",
                          "%s %s" % (table, res['Message']))
            return self._parseRes(res)
        resDict[table] = res['Value']
        selections = res['Value']['Selections']
        tableSelection = {}
        for destination in tableDestinations[table].keys():
            tableSelection[destination] = {}
            for parameter in tableDestinations[table][destination]:
                tableSelection[destination][parameter] = selections.get(
                    parameter, [])

        for table, paramDict in requestedTables.items():
            sortList = paramDict.get('SortList', [])
            startItem = paramDict.get('StartItem', 0)
            maxItems = paramDict.get('MaxItems', 50)
            res = self.__getTableSummaryWeb(
                table,
                tableSelection[table],
                sortList,
                startItem,
                maxItems,
                selectColumns=tableSelections[table],
                timeStamp=tableTimeStamps[table],
                statusColumn=tableStatusColumn[table])
            if not res['OK']:
                gLogger.error("Failed to get Summary for table",
                              "%s %s" % (table, res['Message']))
                return self._parseRes(res)
            resDict[table] = res['Value']
        return S_OK(resDict)
Пример #37
0
 def _parseRes(self, res):
     if not res['OK']:
         gLogger.error('TransformationManager failure', res['Message'])
     return res
Пример #38
0
    def getStorages(self, storageName, pluginList=None, hideExceptions=False):
        """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        :params storageName: is the DIRAC SE name i.e. 'CERN-RAW'
        :params pluginList: is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']

        :return: dictionary containing storage elements and information about them
    """
        self.remotePlugins = []
        self.localPlugins = []
        self.name = ''
        self.options = {}
        self.protocols = {}
        self.storages = []
        if pluginList is None:
            pluginList = []
        elif isinstance(pluginList, basestring):
            pluginList = [pluginList]
        if not self.vo:
            gLogger.warn('No VO information available')

        # Get the name of the storage provided
        res = self._getConfigStorageName(storageName, 'Alias')
        if not res['OK']:
            return res
        storageName = res['Value']
        self.name = storageName

        # In case the storage is made from a base SE, get this information
        res = self._getConfigStorageName(storageName, 'BaseSE')
        if not res['OK']:
            return res
        # If the storage is derived frmo another one, keep the information
        # We initialize the seConfigPath to SE_BASE_CONFIG_PATH if there is a derivedSE, SE_CONFIG_PATH if not
        if res['Value'] != storageName:
            derivedStorageName = storageName
            storageName = res['Value']
            seConfigPath = SE_BASE_CONFIG_PATH
        else:
            derivedStorageName = None
            seConfigPath = SE_CONFIG_PATH

        # Get the options defined in the CS for this storage
        res = self._getConfigStorageOptions(
            storageName,
            derivedStorageName=derivedStorageName,
            seConfigPath=seConfigPath)
        if not res['OK']:
            # This is for the backward compatibility and to invite developer to move their BaseSE in the correct section
            gLogger.warn("Deprecated configuration, you can ignore the error message above."\
                           " Please move the baseSE in the correct section: ", SE_BASE_CONFIG_PATH)
            # We change the value of seConfigPath to avoid other errors due to the bad SE_BASE_CONFIG_PATH
            seConfigPath = SE_CONFIG_PATH
            res = self._getConfigStorageOptions(
                storageName,
                derivedStorageName=derivedStorageName,
                seConfigPath=seConfigPath)
            if not res['OK']:
                return res
        self.options = res['Value']

        # Get the protocol specific details
        res = self._getConfigStorageProtocols(
            storageName,
            derivedStorageName=derivedStorageName,
            seConfigPath=seConfigPath)
        if not res['OK']:
            return res
        self.protocols = res['Value']

        requestedLocalPlugins = []
        requestedRemotePlugins = []
        requestedProtocolDetails = []
        turlProtocols = []
        # Generate the protocol specific plug-ins
        for protocolSection, protocolDetails in self.protocols.iteritems():
            pluginName = protocolDetails.get('PluginName', protocolSection)
            if pluginList and pluginName not in pluginList:
                continue
            protocol = protocolDetails['Protocol']
            result = self.__generateStorageObject(
                storageName,
                pluginName,
                protocolDetails,
                hideExceptions=hideExceptions)
            if result['OK']:
                self.storages.append(result['Value'])
                if pluginName in self.localPlugins:
                    turlProtocols.append(protocol)
                    requestedLocalPlugins.append(pluginName)
                if pluginName in self.remotePlugins:
                    requestedRemotePlugins.append(pluginName)
                requestedProtocolDetails.append(protocolDetails)
            else:
                gLogger.info(result['Message'])

        if self.storages:
            resDict = {}
            resDict['StorageName'] = self.name
            resDict['StorageOptions'] = self.options
            resDict['StorageObjects'] = self.storages
            resDict['LocalPlugins'] = requestedLocalPlugins
            resDict['RemotePlugins'] = requestedRemotePlugins
            resDict['ProtocolOptions'] = requestedProtocolDetails
            resDict['TurlProtocols'] = turlProtocols
            return S_OK(resDict)
        else:
            errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
            gLogger.error(errStr, self.name)
            return S_ERROR(errStr)
Пример #39
0
  def getStorages( self, storageName, pluginList = None, hideExceptions = False ):
    """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'pluginList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
    self.remotePlugins = []
    self.localPlugins = []
    self.name = ''
    self.options = {}
    self.protocolDetails = []
    self.storages = []
    if pluginList is None:
      pluginList = []
    elif isinstance( pluginList, basestring ):
      pluginList = [pluginList]
    if not self.vo:
      gLogger.warn( 'No VO information available' )

    # Get the name of the storage provided
    res = self._getConfigStorageName( storageName, 'Alias' )
    if not res['OK']:
      return res
    storageName = res['Value']
    self.name = storageName

    # In case the storage is made from a base SE, get this information
    res = self._getConfigStorageName( storageName, 'BaseSE' )
    if not res['OK']:
      return res
    # If the storage is derived frmo another one, keep the information
    if res['Value'] != storageName:
      derivedStorageName = storageName
      storageName = res['Value']
    else:
      derivedStorageName = None

    # Get the options defined in the CS for this storage
    res = self._getConfigStorageOptions( storageName, derivedStorageName = derivedStorageName )
    if not res['OK']:
      return res
    self.options = res['Value']

    # Get the protocol specific details
    res = self._getConfigStorageProtocols( storageName, derivedStorageName = derivedStorageName )
    if not res['OK']:
      return res
    self.protocolDetails = res['Value']

    requestedLocalPlugins = []
    requestedRemotePlugins = []
    requestedProtocolDetails = []
    turlProtocols = []
    # Generate the protocol specific plug-ins
    for protocolDict in self.protocolDetails:
      pluginName = protocolDict.get( 'PluginName' )
      if pluginList and pluginName not in pluginList:
        continue
      protocol = protocolDict['Protocol']
      result = self.__generateStorageObject( storageName, pluginName, protocolDict, hideExceptions = hideExceptions )
      if result['OK']:
        self.storages.append( result['Value'] )
        if pluginName in self.localPlugins:
          turlProtocols.append( protocol )
          requestedLocalPlugins.append( pluginName )
        if pluginName in self.remotePlugins:
          requestedRemotePlugins.append( pluginName )
        requestedProtocolDetails.append( protocolDict )
      else:
        gLogger.info( result['Message'] )

    if len( self.storages ) > 0:
      resDict = {}
      resDict['StorageName'] = self.name
      resDict['StorageOptions'] = self.options
      resDict['StorageObjects'] = self.storages
      resDict['LocalPlugins'] = requestedLocalPlugins
      resDict['RemotePlugins'] = requestedRemotePlugins
      resDict['ProtocolOptions'] = requestedProtocolDetails
      resDict['TurlProtocols'] = turlProtocols
      return S_OK( resDict )
    else:
      errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
      gLogger.error( errStr, self.name )
      return S_ERROR( errStr )
Пример #40
0
  def _ByJobType(self):
    """ By default, all sites are allowed to do every job.
        The actual rules are freely specified in the Operation JobTypeMapping section.
        The content of the section may look like this:

        User
        {
          Exclude = PAK
          Exclude += Ferrara
          Exclude += Bologna
          Exclude += Paris
          Exclude += CERN
          Exclude += IN2P3
          Allow
          {
            Paris = IN2P3
            CERN = CERN
            IN2P3 = IN2P3
          }
        }
        DataReconstruction
        {
          Exclude = PAK
          Exclude += Ferrara
          Exclude += CERN
          Exclude += IN2P3
          Allow
          {
            Ferrara = CERN
            CERN = CERN
            IN2P3 = IN2P3
            IN2P3 += CERN
          }
        }
        Merge
        {
          Exclude = ALL
          Allow
          {
            CERN = CERN
            IN2P3 = IN2P3
          }
        }

        The sites in the exclusion list will be removed.
        The allow section says where each site may help another site

    """
    # 1. get sites list
    res = getSites()
    if not res['OK']:
      gLogger.error("Could not get the list of sites", res['Message'])
      return res
    destSites = set(res['Value'])

    # 2. get JobTypeMapping "Exclude" value (and add autoAddedSites)
    gLogger.debug("Getting JobTypeMapping 'Exclude' value (and add autoAddedSites)")
    jobType = self.params['JobType']
    if not jobType:
      raise RuntimeError("No jobType specified")
    excludedSites = set(self.opsH.getValue('JobTypeMapping/%s/Exclude' % jobType, []))
    gLogger.debug("Explicitly excluded sites for %s task: %s" % (jobType, ','.join(excludedSites)))
    autoAddedSites = self.opsH.getValue('JobTypeMapping/AutoAddedSites', [])
    if 'WithStorage' in autoAddedSites:
      # Add all sites with storage, such that jobs can run wherever data is
      autoAddedSites.remove('WithStorage')
      autoAddedSites += DMSHelpers().getTiers(withStorage=True, tier=(0, 1, 2))

    # 3. removing sites in Exclude
    if not excludedSites:
      pass
    elif 'ALL' in excludedSites:
      destSites = set()
    else:
      destSites -= excludedSites

    # 4. get JobTypeMapping "Allow" section
    res = self.opsH.getOptionsDict('JobTypeMapping/%s/Allow' % jobType)
    if not res['OK']:
      gLogger.verbose(res['Message'])
      allowed = {}
    else:
      allowed = dict((site, set(fromChar(fromSites))) for site, fromSites in res['Value'].iteritems())

    autoAddedSites = set(self.opsH.getValue('JobTypeMapping/%s/AutoAddedSites' % jobType, autoAddedSites))
    gLogger.debug("Auto-added sites for %s task: %s" % (jobType, ','.join(autoAddedSites)))
    # 5. add autoAddedSites, if requested
    for autoAddedSite in autoAddedSites:
      allowed.setdefault(autoAddedSite, set()).add(autoAddedSite)
    gLogger.debug("Allowed sites for %s task: %s" % (jobType, ','.join(allowed)))

    # 6. Allowing sites that should be allowed
    taskSiteDestination = self._BySE()

    for destSite, fromSites in allowed.iteritems():
      for fromSite in fromSites:
        if not taskSiteDestination or fromSite in taskSiteDestination:
          destSites.add(destSite)

    gLogger.verbose("Computed list of destination sites for %s task with TargetSE %s: %s" % (jobType,
                                                                                             self.params['TargetSE'],
                                                                                             ','.join(destSites)))
    return destSites
Пример #41
0
 def submitTasks(self):
     """ Submit the tasks to an external system, using the taskManager provided
 """
     gLogger.info("submitTasks: Submitting tasks for transformations")
     res = getProxyInfo(False, False)
     if not res['OK']:
         gLogger.error(
             "submitTasks: Failed to determine credentials for submission",
             res['Message'])
         return res
     proxyInfo = res['Value']
     owner = proxyInfo['username']
     ownerGroup = proxyInfo['group']
     ownerDN = proxyInfo['identity']
     gLogger.info(
         "submitTasks: Tasks will be submitted with the credentials %s:%s" %
         (owner, ownerGroup))
     # Get the transformations which should be submitted
     tasksPerLoop = self.am_getOption('TasksPerLoop', 50)
     status = self.am_getOption('SubmitStatus', ['Active', 'Completing'])
     res = self._selectTransformations(transType=self.transType,
                                       status=status)
     if not res['OK']:
         return res
     for transformation in res['Value']:
         transID = transformation['TransformationID']
         transBody = transformation['Body']
         res = self.transClient.getTasksToSubmit(transID, tasksPerLoop)
         if not res['OK']:
             gLogger.error(
                 "submitTasks: Failed to obtain tasks for transformation",
                 "%s %s" % (transID, res['Message']))
             continue
         tasks = res['Value']['JobDictionary']
         if not tasks:
             gLogger.verbose(
                 "submitTasks: No tasks found for submission for transformation %s"
                 % transID)
             continue
         gLogger.info(
             "submitTasks: Obtained %d tasks for submission for transformation %s"
             % (len(tasks), transID))
         res = self.taskManager.prepareTransformationTasks(
             transBody, tasks, owner, ownerGroup, ownerDN)
         if not res['OK']:
             gLogger.error(
                 "submitTasks: Failed to prepare tasks for transformation",
                 "%s %s" % (transID, res['Message']))
             continue
         res = self.taskManager.submitTransformationTasks(res['Value'])
         if not res['OK']:
             gLogger.error(
                 "submitTasks: Failed to submit prepared tasks for transformation",
                 "%s %s" % (transID, res['Message']))
             continue
         res = self.taskManager.updateDBAfterTaskSubmission(res['Value'])
         if not res['OK']:
             gLogger.error(
                 "submitTasks: Failed to update DB after task submission for transformation",
                 "%s %s" % (transID, res['Message']))
             continue
     gLogger.info(
         "submitTasks: Submission of transformation tasks complete")
     return S_OK()
Пример #42
0
    def _getConfigStorageProtocolDetails(self,
                                         storageName,
                                         protocolSection,
                                         seConfigPath=SE_CONFIG_PATH):
        """
      Parse the contents of the protocol block

      :params storageName: is the storage section to check in the CS
      :params protocolSection: name of the protocol section to find information
      :params seConfigPath: the path of the storage section. 
                              It can be /Resources/StorageElements or StorageElementBases
      :return: dictionary of the protocol options 
    """
        # First obtain the options that are available
        protocolConfigPath = cfgPath(seConfigPath, storageName,
                                     protocolSection)
        res = gConfig.getOptions(protocolConfigPath)
        if not res['OK']:
            errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
            gLogger.error(errStr, "%s: %s" % (storageName, protocolSection))
            return S_ERROR(errStr)
        options = res['Value']

        # We must have certain values internally even if not supplied in CS
        protocolDict = {
            'Access': '',
            'Host': '',
            'Path': '',
            'Port': '',
            'Protocol': '',
            'SpaceToken': '',
            'WSUrl': ''
        }
        for option in options:
            configPath = cfgPath(protocolConfigPath, option)
            optionValue = gConfig.getValue(configPath, '')
            protocolDict[option] = optionValue

        # Evaluate the base path taking into account possible VO specific setting
        if self.vo:
            result = gConfig.getOptionsDict(
                cfgPath(protocolConfigPath, 'VOPath'))
            voPath = ''
            if result['OK']:
                voPath = result['Value'].get(self.vo, '')
            if voPath:
                protocolDict['Path'] = voPath

        # Now update the local and remote protocol lists.
        # A warning will be given if the Access option is not set and the plugin is not already in remote or local.
        plugin = protocolDict.get('PluginName', protocolSection)
        if protocolDict['Access'].lower() == 'remote':
            self.remotePlugins.append(plugin)

        elif protocolDict['Access'].lower() == 'local':
            self.localPlugins.append(plugin)
        elif protocolSection not in self.protocols:
            errStr = "StorageFactory.__getProtocolDetails: The 'Access' option \
      for %s:%s is neither 'local' or 'remote'." % (storageName,
                                                    protocolSection)
            gLogger.warn(errStr)

        return S_OK(protocolDict)
Пример #43
0
    def updateTaskStatus(self):
        """ Updates the task status
    """
        gLogger.info("updateTaskStatus: Updating the Status of tasks")
        # Get the transformations to be updated
        status = self.am_getOption('UpdateTasksStatus',
                                   ['Active', 'Completing', 'Stopped'])
        res = self._selectTransformations(transType=self.transType,
                                          status=status,
                                          agentType=[])
        if not res['OK']:
            return res
        for transformation in res['Value']:
            transID = transformation['TransformationID']
            # Get the tasks which are in a UPDATE state
            updateStatus = self.am_getOption('TaskUpdateStatus', [
                'Checking', 'Deleted', 'Killed', 'Staging', 'Stalled',
                'Matched', 'Rescheduled', 'Completed', 'Submitted', 'Assigned',
                'Received', 'Waiting', 'Running'
            ])
            condDict = {
                "TransformationID": transID,
                "ExternalStatus": updateStatus
            }
            timeStamp = str(datetime.datetime.utcnow() -
                            datetime.timedelta(minutes=10))
            res = self.transClient.getTransformationTasks(
                condDict=condDict, older=timeStamp, timeStamp='LastUpdateTime')
            if not res['OK']:
                gLogger.error(
                    "updateTaskStatus: Failed to get tasks to update for transformation",
                    "%s %s" % (transID, res['Message']))
                continue
            if not res['Value']:
                gLogger.verbose(
                    "updateTaskStatus: No tasks found to update for transformation %s"
                    % transID)
                continue
            gLogger.verbose(
                "updateTaskStatus: getting %d tasks status of transformation %s"
                % (len(res['Value']), transID))
            res = self.taskManager.getSubmittedTaskStatus(res['Value'])
            if not res['OK']:
                gLogger.error(
                    "updateTaskStatus: Failed to get updated task statuses for transformation",
                    "%s %s" % (transID, res['Message']))
                continue
            statusDict = res['Value']
            if not statusDict:
                gLogger.info(
                    "updateTaskStatus: No tasks to update for transformation %d"
                    % transID)
            else:
                for status in sorted(statusDict):
                    taskIDs = statusDict[status]
                    gLogger.info(
                        "updateTaskStatus: Updating %d task(s) from transformation %d to %s"
                        % (len(taskIDs), transID, status))
                    res = self.transClient.setTaskStatus(
                        transID, taskIDs, status)
                    if not res['OK']:
                        gLogger.error(
                            "updateTaskStatus: Failed to update task status for transformation",
                            "%s %s" % (transID, res['Message']))

        gLogger.info(
            "updateTaskStatus: Transformation task status update complete")
        return S_OK()
Пример #44
0
 def __returnProblematicError( self, fileID, res ):
   self.incrementProblematicRetry( fileID )
   gLogger.error( 'DataIntegrityClient failure', res['Message'] )
   return res
Пример #45
0

def setSite(args):
    global Site
    Site = args


def setQueue(args):
    global Queue
    Queue = args


Script.registerSwitch("N:", "Name=", "Computing Element Name (Mandatory)",
                      setCEName)
Script.registerSwitch("S:", "Site=", "Site Name (Mandatory)", setSite)
Script.registerSwitch("Q:", "Queue=", "Queue Name (Mandatory)", setQueue)

Script.parseCommandLine(ignoreErrors=True)
args = Script.getExtraCLICFGFiles()

if len(args) > 1:
    Script.showHelp(exitCode=1)

result = Resources.getQueue(Site, ceName, Queue)

if not result['OK']:
    gLogger.error("Could not retrieve resource parameters",
                  ": " + result['Message'])
    DIRACExit(1)
gLogger.notice(json.dumps(result['Value']))
Пример #46
0
 def checkReservedTasks(self):
     gLogger.info("checkReservedTasks: Checking Reserved tasks")
     # Get the transformations which should be checked
     status = self.am_getOption('CheckReservedStatus',
                                ['Active', 'Completing', 'Stopped'])
     res = self._selectTransformations(transType=self.transType,
                                       status=status,
                                       agentType=[])
     if not res['OK']:
         return res
     for transformation in res['Value']:
         transID = transformation['TransformationID']
         # Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
         condDict = {
             "TransformationID": transID,
             "ExternalStatus": 'Reserved'
         }
         time_stamp_older = str(datetime.datetime.utcnow() -
                                datetime.timedelta(hours=1))
         time_stamp_newer = str(datetime.datetime.utcnow() -
                                datetime.timedelta(days=7))
         res = self.transClient.getTransformationTasks(
             condDict=condDict,
             older=time_stamp_older,
             newer=time_stamp_newer)
         if not res['OK']:
             gLogger.error(
                 "checkReservedTasks: Failed to get Reserved tasks for transformation",
                 "%s %s" % (transID, res['Message']))
             continue
         if not res['Value']:
             gLogger.verbose(
                 "checkReservedTasks: No Reserved tasks found for transformation %s"
                 % transID)
             continue
         res = self.taskManager.updateTransformationReservedTasks(
             res['Value'])
         if not res['OK']:
             gLogger.info(
                 "checkReservedTasks: No Reserved tasks found for transformation %s"
                 % transID)
             continue
         noTasks = res['Value']['NoTasks']
         taskNameIDs = res['Value']['TaskNameIDs']
         # For the tasks with no associated request found re-set the status of the task in the transformationDB
         for taskName in noTasks:
             transID, taskID = taskName.split('_')
             gLogger.info(
                 "checkReservedTasks: Resetting status of %s to Created as no associated task found"
                 % (taskName))
             res = self.transClient.setTaskStatus(int(transID), int(taskID),
                                                  'Created')
             if not res['OK']:
                 gLogger.warn(
                     "checkReservedTasks: Failed to update task status and ID after recovery",
                     "%s %s" % (taskName, res['Message']))
         # For the tasks for which an associated request was found update the task details in the transformationDB
         for taskName, extTaskID in taskNameIDs.items():
             transID, taskID = taskName.split('_')
             gLogger.info(
                 "checkReservedTasks: Resetting status of %s to Created with ID %s"
                 % (taskName, extTaskID))
             res = self.transClient.setTaskStatusAndWmsID(
                 int(transID), int(taskID), 'Submitted', str(extTaskID))
             if not res['OK']:
                 gLogger.warn(
                     "checkReservedTasks: Failed to update task status and ID after recovery",
                     "%s %s" % (taskName, res['Message']))
     gLogger.info("checkReservedTasks: Updating of reserved tasks complete")
     return S_OK()
Пример #47
0
 def export_getRequestName(cls, requestID):
     """ get request name for a given requestID """
     requestName = cls.__requestDB.getRequestName(requestID)
     if not requestName["OK"]:
         gLogger.error("getRequestName: %s" % requestName["Message"])
     return requestName
Пример #48
0
    def execute(self):
        """This is the first logical task to be executed and manages the New->Waiting transition of the Replicas"""
        res = self.__getNewReplicas()
        if not res["OK"]:
            gLogger.fatal(
                "RequestPreparation.prepareNewReplicas: Failed to get replicas from StagerDB.",
                res["Message"])
            return res
        if not res["Value"]:
            gLogger.info("There were no New replicas found")
            return res
        replicas = res["Value"]["Replicas"]
        replicaIDs = res["Value"]["ReplicaIDs"]
        gLogger.info(
            "RequestPreparation.prepareNewReplicas: Obtained %s New replicas for preparation."
            % len(replicaIDs))

        # Check if the files exist in the FileCatalog
        res = self.__getExistingFiles(replicas)
        if not res["OK"]:
            return res
        exist = res["Value"]["Exist"]
        terminal = res["Value"]["Missing"]
        failed = res["Value"]["Failed"]
        if not exist:
            gLogger.error(
                "RequestPreparation.prepareNewReplicas: Failed to determine the existence of any file"
            )
            return S_OK()
        terminalReplicaIDs = {}
        for lfn, reason in terminal.items():
            for replicaID in replicas[lfn].values():
                terminalReplicaIDs[replicaID] = reason
            replicas.pop(lfn)
        gLogger.info(
            "RequestPreparation.prepareNewReplicas: %s files exist in the FileCatalog."
            % len(exist))
        if terminal:
            gLogger.info(
                "RequestPreparation.prepareNewReplicas: %s files do not exist in the FileCatalog."
                % len(terminal))

        # Obtain the file sizes from the FileCatalog
        res = self.__getFileSize(exist)
        if not res["OK"]:
            return res
        failed.update(res["Value"]["Failed"])
        terminal = res["Value"]["ZeroSize"]
        fileSizes = res["Value"]["FileSizes"]
        if not fileSizes:
            gLogger.error(
                "RequestPreparation.prepareNewReplicas: Failed determine sizes of any files"
            )
            return S_OK()
        for lfn, reason in terminal.items():
            for _se, replicaID in replicas[lfn].items():
                terminalReplicaIDs[replicaID] = reason
            replicas.pop(lfn)
        gLogger.info(
            "RequestPreparation.prepareNewReplicas: Obtained %s file sizes from the FileCatalog."
            % len(fileSizes))
        if terminal:
            gLogger.info(
                "RequestPreparation.prepareNewReplicas: %s files registered with zero size in the FileCatalog."
                % len(terminal))

        # Obtain the replicas from the FileCatalog
        res = self.__getFileReplicas(list(fileSizes))
        if not res["OK"]:
            return res
        failed.update(res["Value"]["Failed"])
        terminal = res["Value"]["ZeroReplicas"]
        fileReplicas = res["Value"]["Replicas"]
        if not fileReplicas:
            gLogger.error(
                "RequestPreparation.prepareNewReplicas: Failed determine replicas for any files"
            )
            return S_OK()
        for lfn, reason in terminal.items():
            for _se, replicaID in replicas[lfn].items():
                terminalReplicaIDs[replicaID] = reason
            replicas.pop(lfn)
        gLogger.info(
            "RequestPreparation.prepareNewReplicas: Obtained replica information for %s file from the FileCatalog."
            % len(fileReplicas))
        if terminal:
            gLogger.info(
                "RequestPreparation.prepareNewReplicas: %s files registered with zero replicas in the FileCatalog."
                % len(terminal))

        # Check the replicas exist at the requested site
        replicaMetadata = []
        for lfn, requestedSEs in replicas.items():
            lfnReplicas = fileReplicas.get(lfn)

            # This should not happen in principle, but it was seen
            # after a corrupted staging request has entered the DB
            if not lfnReplicas:
                gLogger.error("Missing replicas information",
                              "%s %s" % (lfn, requestedSEs))
                continue

            for requestedSE, replicaID in requestedSEs.items():
                if requestedSE not in lfnReplicas.keys():
                    terminalReplicaIDs[
                        replicaID] = "LFN not registered at requested SE"
                    replicas[lfn].pop(requestedSE)
                else:
                    replicaMetadata.append(
                        (replicaID, lfnReplicas[requestedSE], fileSizes[lfn]))

        # Update the states of the files in the database
        if terminalReplicaIDs:
            gLogger.info(
                "RequestPreparation.prepareNewReplicas: %s replicas are terminally failed."
                % len(terminalReplicaIDs))
            # res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
            res = self.stagerClient.updateReplicaFailure(terminalReplicaIDs)
            if not res["OK"]:
                gLogger.error(
                    "RequestPreparation.prepareNewReplicas: Failed to update replica failures.",
                    res["Message"])
        if replicaMetadata:
            gLogger.info(
                "RequestPreparation.prepareNewReplicas: %s replica metadata to be updated."
                % len(replicaMetadata))
            # Sets the Status='Waiting' of CacheReplicas records that are OK with catalogue checks
            res = self.stagerClient.updateReplicaInformation(replicaMetadata)
            if not res["OK"]:
                gLogger.error(
                    "RequestPreparation.prepareNewReplicas: Failed to update replica metadata.",
                    res["Message"])
        return S_OK()
Пример #49
0
    def export_putRequest(cls, requestJSON):
        """ put a new request into RequestDB

    :param cls: class ref
    :param str requestJSON: request serialized to JSON format
    """
        requestDict = json.loads(requestJSON)
        requestName = requestDict.get(
            "RequestID", requestDict.get('RequestName', "***UNKNOWN***"))
        request = Request(requestDict)
        optimized = request.optimize()
        if optimized.get("Value", False):
            gLogger.debug("putRequest: request was optimized")
        else:
            gLogger.debug(
                "putRequest: request unchanged",
                optimized.get("Message", "Nothing could be optimized"))

        valid = cls.validate(request)
        if not valid["OK"]:
            gLogger.error("putRequest: request %s not valid: %s" %
                          (requestName, valid["Message"]))
            return valid

        # If NotBefore is not set or user defined, we calculate its value

        now = datetime.datetime.utcnow().replace(microsecond=0)
        extraDelay = datetime.timedelta(0)
        if request.Status not in Request.FINAL_STATES and (
                not request.NotBefore or request.NotBefore < now):
            # We don't delay if it is the first insertion
            if getattr(request, 'RequestID', 0):
                # If it is a constant delay, just set it
                if cls.constantRequestDelay:
                    extraDelay = datetime.timedelta(
                        minutes=cls.constantRequestDelay)
                else:
                    # If there is a waiting Operation with Files
                    op = request.getWaiting().get('Value')
                    if op and len(op):
                        attemptList = [
                            opFile.Attempt for opFile in op
                            if opFile.Status == "Waiting"
                        ]
                        if attemptList:
                            maxWaitingAttempt = max([
                                opFile.Attempt for opFile in op
                                if opFile.Status == "Waiting"
                            ])
                            # In case it is the first attempt, extraDelay is 0
                            # maxWaitingAttempt can be None if the operation has no File, like the ForwardDiset
                            extraDelay = datetime.timedelta(
                                minutes=2 * math.log(maxWaitingAttempt)
                                if maxWaitingAttempt else 0)

                request.NotBefore = now + extraDelay

        gLogger.info("putRequest: request %s not before %s (extra delay %s)" %
                     (request.RequestName, request.NotBefore, extraDelay))

        requestName = request.RequestName
        gLogger.info("putRequest: Attempting to set request '%s'" %
                     requestName)
        return cls.__requestDB.putRequest(request)
Пример #50
0
 def export_getRequestInfo(cls, requestName):
     """ get request info for a given requestID/Name """
     requestInfo = cls.__requestDB.getRequestInfo(requestName)
     if not requestInfo["OK"]:
         gLogger.error("getRequestInfo: %s" % requestInfo["Message"])
     return requestInfo
Пример #51
0
def main():
    global hostName
    global hostDN
    global hostProperties
    Script.registerSwitch("H:", "HostName:", "Name of the Host (Mandatory)",
                          setHostName)
    Script.registerSwitch("D:", "HostDN:",
                          "DN of the Host Certificate (Mandatory)", setHostDN)
    Script.registerSwitch(
        "P:", "Property:",
        "Property to be added to the Host (Allow Multiple instances or None)",
        addProperty)
    # Registering arguments will automatically add their description to the help menu
    Script.registerArgument([
        "Property=<Value>: Other properties to be added to the Host like (Responsible=XXX)"
    ],
                            mandatory=False)

    _, args = Script.parseCommandLine(ignoreErrors=True)

    if hostName is None or hostDN is None:
        Script.showHelp(exitCode=1)

    from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin

    diracAdmin = DiracAdmin()
    exitCode = 0
    errorList = []

    hostProps = {"DN": hostDN}
    if hostProperties:
        hostProps["Properties"] = ", ".join(hostProperties)

    for prop in args:
        pl = prop.split("=")
        if len(pl) < 2:
            errorList.append(
                ("in arguments",
                 "Property %s has to include a '=' to separate name from value"
                 % prop))
            exitCode = 255
        else:
            pName = pl[0]
            pValue = "=".join(pl[1:])
            gLogger.info("Setting property %s to %s" % (pName, pValue))
            hostProps[pName] = pValue

    if not diracAdmin.csModifyHost(
            hostName, hostProps, createIfNonExistant=True)["OK"]:
        errorList.append(("add host", "Cannot register host %s" % hostName))
        exitCode = 255
    else:
        result = diracAdmin.csCommitChanges()
        if not result["OK"]:
            errorList.append(("commit", result["Message"]))
            exitCode = 255

    if exitCode == 0:
        from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient

        cmc = ComponentMonitoringClient()
        ret = cmc.hostExists(dict(HostName=hostName))
        if not ret["OK"]:
            gLogger.error(
                "Cannot check if host is registered in ComponentMonitoring",
                ret["Message"])
        elif ret["Value"]:
            gLogger.info("Host already registered in ComponentMonitoring")
        else:
            ret = cmc.addHost(dict(HostName=hostName, CPU="TO_COME"))
            if not ret["OK"]:
                gLogger.error("Failed to add Host to ComponentMonitoring",
                              ret["Message"])

    for error in errorList:
        gLogger.error("%s: %s" % error)

    DIRAC.exit(exitCode)
Пример #52
0
 def export_getRequestStatus(cls, requestName):
     """ get request status given its name """
     status = cls.__requestDB.getRequestStatus(requestName)
     if not status["OK"]:
         gLogger.error("getRequestStatus: %s" % status["Message"])
     return status
Пример #53
0
    inputFile = open(inputFileName, 'r')
    string = inputFile.read()
    inputFile.close()
    lfns.extend([lfn.strip() for lfn in string.splitlines()])
  else:
    lfns.append(inputFileName)

from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()

errorReasons = {}
successfullyRemoved = 0
for lfnList in breakListIntoChunks(lfns, 100):
  res = dm.removeFile(lfnList)
  if not res['OK']:
    gLogger.error("Failed to remove data", res['Message'])
    DIRAC.exit(-2)
  for lfn, r in res['Value']['Failed'].items():
    reason = str(r)
    if reason not in errorReasons.keys():
      errorReasons[reason] = []
    errorReasons[reason].append(lfn)
  successfullyRemoved += len(res['Value']['Successful'].keys())

for reason, lfns in errorReasons.items():
  gLogger.notice("Failed to remove %d files with error: %s" % (len(lfns), reason))
if successfullyRemoved > 0:
  gLogger.notice("Successfully removed %d files" % successfullyRemoved)
DIRAC.exit(0)
Пример #54
0
    def export_checkComponentLog(self, component):
        """Check component log for errors"""
        componentList = []
        if "*" in component:
            if component == "*":
                result = gComponentInstaller.getSetupComponents()
                if result["OK"]:
                    for ctype in ["Services", "Agents", "Executors"]:
                        if ctype in result["Value"]:
                            for sname in result["Value"][ctype]:
                                for cname in result["Value"][ctype][sname]:
                                    componentList.append("/".join(
                                        [sname, cname]))
        elif isinstance(component, str):
            componentList = [component]
        else:
            componentList = component

        resultDict = {}
        for comp in componentList:
            if "/" not in comp:
                continue
            system, cname = comp.split("/")

            startDir = gComponentInstaller.startDir
            currentLog = startDir + "/" + system + "_" + cname + "/log/current"
            try:
                with open(currentLog, "r") as logFile:
                    logLines = logFile.readlines()
            except IOError as err:
                gLogger.error("File does not exists:", currentLog)
                resultDict[comp] = {
                    "ErrorsHour": -1,
                    "ErrorsDay": -1,
                    "LastError": currentLog + "::" + repr(err)
                }
                continue

            errors_1 = 0
            errors_24 = 0
            now = datetime.utcnow()
            lastError = ""
            for line in logLines:
                if "ERROR:" in line:
                    fields = line.split()
                    recent = False
                    if len(fields) < 2:  # if the line contains only one word
                        lastError = line.split("ERROR:")[-1].strip()
                        continue
                    timeStamp = fromString(fields[0] + " " + fields[1])
                    if not timeStamp:  # if the timestamp is missing in the log
                        lastError = line.split("ERROR:")[-1].strip()
                        continue
                    if (now - timeStamp) < hour:
                        errors_1 += 1
                        recent = True
                    if (now - timeStamp) < day:
                        errors_24 += 1
                        recent = True
                    if recent:
                        lastError = line.split("ERROR:")[-1].strip()

            resultDict[comp] = {
                "ErrorsHour": errors_1,
                "ErrorsDay": errors_24,
                "LastError": lastError
            }

        return S_OK(resultDict)
Пример #55
0
            if not result['OK']:
                self._transportPool.sendAndClose(trid, result)
                return
            proposalTuple = result['Value']
            #Instantiate handler
            result = self._instantiateHandler(trid, proposalTuple)
            if not result['OK']:
                self._transportPool.sendAndClose(trid, result)
                return
            handlerObj = result['Value']
            #Execute the action
            result = self._processProposal(trid, proposalTuple, handlerObj)
            #Close the connection if required
            if result['closeTransport'] or not result['OK']:
                if not result['OK']:
                    gLogger.error("Error processing proposal",
                                  result['Message'])
                self._transportPool.close(trid)
            return result
        finally:
            self._lockManager.unlockGlobal()
            if monReport:
                self.__endReportToMonitoring(*monReport)

    def _createIdentityString(self, credDict, clientTransport=None):
        if 'username' in credDict:
            if 'group' in credDict:
                identity = "[%s:%s]" % (credDict['username'],
                                        credDict['group'])
            else:
                identity = "[%s:unknown]" % credDict['username']
        else:
Пример #56
0
    sys.exit()
else:
    transIDs = [int(arg) for arg in sys.argv[1:]]

from DIRAC.TransformationSystem.Agent.ValidateOutputDataAgent import ValidateOutputDataAgent
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC import gLogger
import DIRAC

agent = ValidateOutputDataAgent('Transformation/ValidateOutputDataAgent',
                                'Transformation/ValidateOutputDataAgent',
                                'dirac-transformation-verify-outputdata')
agent.initialize()

client = TransformationClient()
for transID in transIDs:
    res = client.getTransformationParameters(transID, ['Status'])
    if not res['OK']:
        gLogger.error("Failed to determine transformation status")
        gLogger.error(res['Message'])
        continue
    status = res['Value']
    if not status in [
            'ValidatingOutput', 'WaitingIntegrity', 'Active', 'Completed'
    ]:
        gLogger.error(
            "The transformation is in %s status and can not be validated" %
            status)
        continue
    agent.checkTransformationIntegrity(transID)
Пример #57
0
if len(args) < 2:
    Script.showHelp()
    DIRACexit(1)

system = args[0]
component = args[1]

monitoringClient = ComponentMonitoringClient()
result = monitoringClient.getInstallations(
    {
        'Instance': component,
        'UnInstallationTime': None
    }, {'System': system}, {'HostName': socket.getfqdn()}, True)
if not result['OK']:
    gLogger.error(result['Message'])
    DIRACexit(1)
if len(result['Value']) < 1:
    gLogger.error('Given component does not exist')
    DIRACexit(1)
if len(result['Value']) > 1:
    gLogger.error('Too many components match')
    DIRACexit(1)

removeLogs = False
if force:
    removeLogs = True
else:
    if result['Value'][0]['Component']['Type'] in InstallTools.COMPONENT_TYPES:
        result = promptUser('Remove logs?', ['y', 'n'], 'n')
        if result['OK']:
Пример #58
0
    def _getBKKQuery(self, mode='full', fileType=None, previousProdID=0):
        """ simply creates the bkk query dictionary
    """

        if fileType is None:
            fileType = []

        if mode.lower() == 'full':
            bkQuery = {
                'FileType': ';;;'.join(self.bkFileType),
                'EventType': str(self.eventType),
                'ConfigName': self.configName,
                'ConfigVersion': self.configVersion
            }

            if self.dataTakingConditions:
                bkQuery['DataTakingConditions'] = self.dataTakingConditions

            if self.processingPass:
                bkQuery['ProcessingPass'] = self.processingPass

            if self.dqFlag:
                bkQuery['DataQualityFlag'] = self.dqFlag.replace(
                    ',', ';;;').replace(' ', '')

            if self.startRun and self.runsList or self.endRun and self.runsList:
                raise ValueError(
                    "Please don't mix runs list with start/end run")

            if self.endRun and self.startRun:
                if self.endRun < self.startRun:
                    gLogger.error(
                        "Your end run '%d' should be more than your start run '%d'!"
                        % (self.endRun, self.startRun))
                    raise ValueError("Error setting start or end run")

            if self.startRun:
                bkQuery['StartRun'] = self.startRun
            if self.endRun:
                bkQuery['EndRun'] = self.endRun

            if self.runsList:
                bkQuery['RunNumbers'] = self.runsList.replace(',',
                                                              ';;;').replace(
                                                                  ' ', '')

            if self.visibility:
                bkQuery['Visible'] = self.visibility

        elif mode.lower() == 'frompreviousprod':
            bkQuery = {
                'FileType': ';;;'.join(fileType).replace(' ', ''),
                'ProductionID': int(previousProdID)
            }

            if self.eventType:
                bkQuery['EventType'] = str(self.eventType)

            if self.dqFlag:
                bkQuery['DataQualityFlag'] = self.dqFlag.replace(
                    ',', ';;;').replace(' ', '')

        return bkQuery
Пример #59
0
from DIRAC.Core.Base import Script

Script.setUsageMessage("""
Get the currently defined user data volume quotas

Usage:
   %s [options]
""" % Script.scriptName)

Script.parseCommandLine(ignoreErrors=False)

import DIRAC
from DIRAC import gLogger, gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo

res = getProxyInfo(False, False)
if not res['OK']:
    gLogger.error("Failed to get client proxy information.", res['Message'])
    DIRAC.exit(2)
proxyInfo = res['Value']
username = proxyInfo['username']

try:
    quota = gConfig.getValue('/Registry/DefaultStorageQuota', 0.)
    quota = gConfig.getValue('/Registry/Users/%s/Quota' % username, quota)
    gLogger.notice('Current quota found to be %.1f GB' % quota)
    DIRAC.exit(0)
except Exception as x:
    gLogger.exception("Failed to convert retrieved quota", '', x)
    DIRAC.exit(-1)
                        "we can provide the base module version "
                        "(if it is needed): for example: v3r0", cliParams.setExtensionVersion)
  Script.registerSwitch("E:", "extensionSource=", "if we have an extension "
                              "we must provide code repository url", cliParams.setExtensionSource)
  Script.registerSwitch("P:", "extjspath=", "directory of the extjs library", cliParams.setExtJsPath)

  Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
                                    '\nUsage:',
                                    '  %s <option> ...\n' % Script.scriptName,
                                    '  A source, name and version are required to build the tarball',
                                    '  For instance:',
                                    '     %s -n DIRAC -v v1r0 -z '
                                    'svn -u http://svnweb.cern.ch/guest/'
                                    'dirac/DIRAC/tags/DIRAC/v1r0' % Script.scriptName]))

  Script.parseCommandLine(ignoreErrors=False)

  result = cliParams.isOK()
  if not result['OK']:
    gLogger.error(result['Message'])
    Script.showHelp()
    sys.exit(1)

  tmc = TarModuleCreator(cliParams)
  result = tmc.create()
  if not result['OK']:
    gLogger.error("Could not create the tarball: %s" % result['Message'])
    sys.exit(1)
  gLogger.always("Tarball successfully created at %s" % result['Value'])
  sys.exit(0)