Example #1
0
  def doMaster(self):
    """
    This method calls the doNew method for each hostname that exists
    in the DowntimeCache table of the local database.

    :return: S_OK / S_ERROR
    """

    # Query DB for all downtimes
    result = self.rmClient.selectDowntimeCache()
    if not result['OK']:
      return result

    for data in result['Value']:

      # If already processed don't do it again
      if data[0] in self.seenHostnames:
        continue

      # data[0] contains the hostname
      gLogger.verbose("Checking if the downtime of %s has been changed" % data[0])
      result = self.doNew(data[0])
      if not result['OK']:
        return result

      self.seenHostnames.add(data[0])

    return S_OK()
Example #2
0
 def initialize( self ):
   for serviceName in self.__services:
     gLogger.verbose( "Initializing %s" % serviceName )
     result = self.__services[ serviceName ].initialize()
     if not result[ 'OK' ]:
       return result
   return S_OK()
Example #3
0
 def getLocalProtocols( self ):
   """ Get the list of all the local access protocols defined for this Storage Element
   """
   if not self.valid:
     return S_ERROR( self.errorReason )
   gLogger.verbose( "StorageElement.getLocalProtocols: Obtaining local protocols for %s." % self.name )
   return S_OK( self.localProtocols )
Example #4
0
  def _rebuildDirectoryUsage( self ):
    """ Recreate and replenish the Storage Usage tables
    """

    req = "DROP TABLE IF EXISTS FC_DirectoryUsage_backup"
    result = self.db._update( req )
    req = "RENAME TABLE FC_DirectoryUsage TO FC_DirectoryUsage_backup"
    result = self.db._update( req )
    req = "CREATE TABLE `FC_DirectoryUsage` LIKE `FC_DirectoryUsage_backup`"
    result = self.db._update( req )
    if not result['OK']:
      return result

    result = self.__rebuildDirectoryUsageLeaves()
    if not result['OK']:
      return result

    result = self.db.dtree.findDir( '/' )
    if not result['OK']:
      return result
    if not result['Value']:
      return S_ERROR( 'Directory / not found' )
    dirID = result['Value']
    result = self.__rebuildDirectoryUsage( dirID )
    gLogger.verbose( 'Finished rebuilding Directory Usage' )
    return result
Example #5
0
  def __submitFTSTransfer( self ):
    """ create and execute glite-transfer-submit CLI command

    :param self: self reference
    """
    comm = [ 'glite-transfer-submit', '-s', self.ftsServer, '-f', self.surlFile, '-o' ]
    if self.targetToken:
      comm += [ '-t', self.targetToken ]
    if self.sourceToken:
      comm += [ '-S', self.sourceToken ]
    if self.__cksmTest:
      comm.append( "--compare-checksums" )
    gLogger.verbose( 'Executing %s' % ' '.join( comm ) )
    res = executeGridCommand( '', comm )
    os.remove( self.surlFile )
    if not res['OK']:
      return res
    returnCode, output, errStr = res['Value']
    if not returnCode == 0:
      return S_ERROR( errStr )
    guid = output.replace( '\n', '' )
    if not checkGuid( guid ):
      return S_ERROR( 'Wrong GUID format returned' )
    self.ftsGUID = guid
    # if self.priority != 3:
    #  comm = ['glite-transfer-setpriority','-s', self.ftsServer,self.ftsGUID,str(self.priority)]
    #  executeGridCommand('',comm)
    return res
Example #6
0
def _getPoolCatalogs( directory = '' ):

  patterns = ['*.xml', '*.xml*gz']
  omissions = ['\.bak$'] # to be ignored for production files

  #First obtain valid list of unpacked catalog files in directory
  poolCatalogList = []

  for pattern in patterns:
    fileList = glob.glob( os.path.join( directory, pattern ) )
    for fname in fileList:
      if fname.endswith( '.bak' ):
        gLogger.verbose( 'Ignoring BAK file: %s' % fname )
      elif tarfile.is_tarfile( fname ):
        try:
          gLogger.debug( 'Unpacking catalog XML file %s' % ( os.path.join( directory, fname ) ) )
          tarFile = tarfile.open( os.path.join( directory, fname ), 'r' )
          for member in tarFile.getmembers():
            tarFile.extract( member, directory )
            poolCatalogList.append( os.path.join( directory, member.name ) )
        except Exception, x :
          gLogger.error( 'Could not untar with exception', 
                         ' %s: %s' % ( fname, str( x ) ) )
      else:
        poolCatalogList.append( fname )
Example #7
0
 def __setParam( self, value ):
   change = False
   if self.item_called in self.paramTypes:
     if self.paramValues[self.item_called] != value:
       if type( value ) in self.paramTypes[self.item_called]:
         change = True
       else:
         raise TypeError( "%s %s %s expected one of %s" % ( self.item_called, value, type( value ),
                                                            self.paramTypes[self.item_called] ) )
   else:
     if self.item_called not in self.paramValues:
       change = True
     else:
       if self.paramValues[self.item_called] != value:
         change = True
   if not change:
     gLogger.verbose( "No change of parameter %s required" % self.item_called )
   else:
     gLogger.verbose( "Parameter %s to be changed" % self.item_called )
     transID = self.paramValues['TransformationID']
     if self.exists and transID:
       res = self.transClient.setTransformationParameter( transID, self.item_called, value )
       if not res['OK']:
         return res
     self.paramValues[self.item_called] = value
   return S_OK()
Example #8
0
    def execute(self):
        """ Main execution method
    """
        delta = time.time() - self.last_update
        if delta > self.reportPeriod:
            result = self.jobDB.getCounters("Jobs", ["Status", "Site"], {}, "")
            if not result["OK"]:
                return S_ERROR("Failed to get data from the Job Database")
            self.resultDB = result["Value"]
            self.last_update = time.time()

        totalDict = {}
        for status in MONITOR_STATUS:
            totalDict[status] = 0

        for row in self.resultDB:
            site = row[0]["Site"]
            status = row[0]["Status"]
            count = row[1]
            if site in MONITOR_SITES and status in MONITOR_STATUS:
                gLogger.verbose("Adding mark %s-%s: " % (status, site) + str(count))
                gMonitor.addMark("%s-%s" % (status, site), count)
            if status in totalDict:
                totalDict[status] += count

        for status in MONITOR_STATUS:
            gLogger.verbose("Adding mark %s-All sites: " % status + str(totalDict[status]))
            gMonitor.addMark("%s-All sites" % status, totalDict[status])

        return S_OK()
Example #9
0
def findGenericPilotCredentials(vo=False, group=False, pilotDN='', pilotGroup=''):
  """ Looks into the Operations/<>/Pilot section of CS to find the pilot credentials.
      Then check if the user has a registered proxy in ProxyManager.

      if pilotDN or pilotGroup are specified, use them
  """
  if not group and not vo:
    return S_ERROR("Need a group or a VO to determine the Generic pilot credentials")
  if not vo:
    vo = Registry.getVOForGroup(group)
    if not vo:
      return S_ERROR("Group %s does not have a VO associated" % group)
  opsHelper = Operations.Operations(vo=vo)
  if not pilotGroup:
    pilotGroup = opsHelper.getValue("Pilot/GenericPilotGroup", "")
  if not pilotDN:
    pilotDN = opsHelper.getValue("Pilot/GenericPilotDN", "")
  if not pilotDN:
    pilotUser = opsHelper.getValue("Pilot/GenericPilotUser", "")
    if pilotUser:
      result = Registry.getDNForUsername(pilotUser)
      if result['OK']:
        pilotDN = result['Value'][0]
  if pilotDN and pilotGroup:
    gLogger.verbose("Pilot credentials: %s@%s" % (pilotDN, pilotGroup))
    result = gProxyManager.userHasProxy(pilotDN, pilotGroup, 86400)
    if not result['OK']:
      return S_ERROR("%s@%s has no proxy in ProxyManager")
    return S_OK((pilotDN, pilotGroup))

  if pilotDN:
    return S_ERROR("DN %s does not have group %s" % (pilotDN, pilotGroup))
  return S_ERROR("No generic proxy in the Proxy Manager with groups %s" % pilotGroup)
Example #10
0
  def initialize( self ):
    #Build the URLs
    self._url = self._cfg.getURL()
    if not self._url:
      return S_ERROR( "Could not build service URL for %s" % self._name )
    gLogger.verbose( "Service URL is %s" % self._url )
    #Load handler
    result = self._loadHandlerInit()
    if not result[ 'OK' ]:
      return result
    self._handler = result[ 'Value' ]
    #Initialize lock manager
    self._lockManager = LockManager( self._cfg.getMaxWaitingPetitions() )
    self._initMonitoring()
    self._threadPool = ThreadPool( max( 1, self._cfg.getMinThreads() ),
                                   max( 0, self._cfg.getMaxThreads() ),
                                   self._cfg.getMaxWaitingPetitions() )
    self._threadPool.daemonize()
    self._msgBroker = MessageBroker( "%sMSB" % self._name, threadPool = self._threadPool )
    #Create static dict
    self._serviceInfoDict = { 'serviceName' : self._name,
                              'serviceSectionPath' : PathFinder.getServiceSection( self._name ),
                              'URL' : self._cfg.getURL(),
                              'messageSender' : MessageSender( self._name, self._msgBroker ),
                              'validNames' : self._validNames,
                              'csPaths' : [ PathFinder.getServiceSection( svcName ) for svcName in self._validNames ]
                            }
    #Call static initialization function
    try:
      self._handler[ 'class' ]._rh__initializeClass( dict( self._serviceInfoDict ),
                                                     self._lockManager,
                                                     self._msgBroker,
                                                     self._monitor )
      if self._handler[ 'init' ]:
        for initFunc in self._handler[ 'init' ]:
          gLogger.verbose( "Executing initialization function" )
          try:
            result = initFunc( dict( self._serviceInfoDict ) )
          except Exception as excp:
            gLogger.exception( "Exception while calling initialization function", lException = excp )
            return S_ERROR( "Exception while calling initialization function: %s" % str( excp ) )
          if not isReturnStructure( result ):
            return S_ERROR( "Service initialization function %s must return S_OK/S_ERROR" % initFunc )
          if not result[ 'OK' ]:
            return S_ERROR( "Error while initializing %s: %s" % ( self._name, result[ 'Message' ] ) )
    except Exception as e:
      errMsg = "Exception while initializing %s" % self._name
      gLogger.exception( e )
      gLogger.exception( errMsg )
      return S_ERROR( errMsg )

    #Load actions after the handler has initialized itself
    result = self._loadActions()
    if not result[ 'OK' ]:
      return result
    self._actions = result[ 'Value' ]

    gThreadScheduler.addPeriodicTask( 30, self.__reportThreadPoolContents )

    return S_OK()
 def __checkoutFromSVN( self, moduleName = None, sourceURL = None, tagVersion = None ):
   """
   This method checkout a given tag from a SVN repository. 
   Note: we can checkout any project form a SVN repository 
   
   :param str moduleName: The name of the Module
   :param str sourceURL: The code repository
   :param str tagVersion: the tag for example: v4r3p6
   
   """
   
   if not moduleName:
     moduleName = self.params.name
   
   if not sourceURL:
     sourceURL = self.params.sourceURL   
     
   if not tagVersion:
     tagVersion = self.params.version
     
   cmd = "svn export --trust-server-cert --non-interactive '%s/%s' '%s'" % ( sourceURL, tagVersion,
                                                                             os.path.join( self.params.destination, moduleName ) )
   gLogger.verbose( "Executing: %s" % cmd )
   result = Subprocess.systemCall( 900, shlex.split(cmd) )
   if not result[ 'OK' ]:
     return S_ERROR( "Error while retrieving sources from SVN: %s" % result[ 'Message' ] )
   exitStatus, stdData, errData = result[ 'Value' ]
   if exitStatus:
     return S_ERROR( "Error while retrieving sources from SVN: %s" % "\n".join( [ stdData, errData ] ) )
   return S_OK()
Example #12
0
 def setPlotsLocation( self, plotsDir ):
   self.plotsLocation = plotsDir
   for plot in os.listdir( self.plotsLocation ):
     if plot.find( ".png" ) > 0:
       plotLocation = "%s/%s" % ( self.plotsLocation, plot )
       gLogger.verbose( "Purging %s" % plotLocation )
       os.unlink( plotLocation )
Example #13
0
 def __createListeners(self):
   for serviceName in self.__services:
     svcCfg = self.__services[serviceName].getConfig()
     protocol = svcCfg.getProtocol()
     port = svcCfg.getPort()
     if not port:
       return S_ERROR("No port defined for service %s" % serviceName)
     if protocol not in gProtocolDict:
       return S_ERROR("Protocol %s is not known for service %s" % (protocol, serviceName))
     self.__listeningConnections[serviceName] = {'port': port, 'protocol': protocol}
     transportArgs = {}
     for kw in ServiceReactor.__transportExtraKeywords:
       value = svcCfg.getOption(kw)
       if value:
         ikw = ServiceReactor.__transportExtraKeywords[kw]
         if ikw:
           kw = ikw
         if kw == 'timeout':
           value = int(value)
         transportArgs[kw] = value
     gLogger.verbose("Initializing %s transport" % protocol, svcCfg.getURL())
     transport = gProtocolDict[protocol]['transport'](("", port),
                                                      bServerMode=True, **transportArgs)
     retVal = transport.initAsServer()
     if not retVal['OK']:
       return S_ERROR("Cannot start listening connection for service %s: %s" % (serviceName, retVal['Message']))
     self.__listeningConnections[serviceName]['transport'] = transport
     self.__listeningConnections[serviceName]['socket'] = transport.getSocket()
   return S_OK()
Example #14
0
  def export_sendSMS( self, userName, body, fromAddress ):
    """ Send an SMS with supplied body to the specified DIRAC user using the Mail utility via an SMS switch.
    """
    gLogger.verbose( 'Received signal to send the following SMS to %s:\n%s' % ( userName, body ) )
    mobile = gConfig.getValue( '/Registry/Users/%s/Mobile' % userName, '' )
    if not mobile:
      return S_ERROR( 'No registered mobile number for %s' % userName )

    csSection = PathFinder.getServiceSection( 'Framework/Notification' )
    smsSwitch = gConfig.getValue( '%s/SMSSwitch' % csSection, '' )
    if not smsSwitch:
      return S_ERROR( 'No SMS switch is defined in CS path %s/SMSSwitch' % csSection )

    address = '%s@%s' % ( mobile, smsSwitch )
    subject = 'DIRAC SMS'
    m = Mail()
    m._subject = subject
    m._message = body
    m._mailAddress = address
    if not fromAddress == 'None':
      m._fromAddress = fromAddress
    result = m._send()
    if not result['OK']:
      gLogger.warn( 'Could not send SMS to %s with the following message:\n%s' % ( userName, result['Message'] ) )
    else:
      gLogger.info( 'SMS sent successfully to %s ' % ( userName ) )
      gLogger.debug( result['Value'] )

    return result
Example #15
0
 def getTransformationTasks( self, condDict = None, older = None, newer = None, timeStamp = None,
                             orderAttribute = None, limit = 10000, inputVector = False ):
   """ gets all the transformation tasks for a transformation, incrementally.
       "limit" here is just used to determine the offset.
   """
   rpcClient = self._getRPC()
   transformationTasks = []
   if condDict is None:
     condDict = {}
   if timeStamp is None:
     timeStamp = 'CreationTime'
   # getting transformationFiles - incrementally
   offsetToApply = 0
   while True:
     res = rpcClient.getTransformationTasks( condDict, older, newer, timeStamp, orderAttribute, limit,
                                             inputVector, offsetToApply )
     if not res['OK']:
       return res
     else:
       gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
       if res['Value']:
         transformationTasks = transformationTasks + res['Value']
         offsetToApply += limit
       if len( res['Value'] ) < limit:
         break
   return S_OK( transformationTasks )
Example #16
0
  def __init__(self, vo=None):
    """ c'tor

    :param str vo: name of the virtual organization (community)
    """

    if vo is None:
      vo = getVO()
    if not vo:
      raise Exception('No VO name given')

    self.vo = vo
    self.vomsVO = getVOOption(vo, "VOMSName")
    if not self.vomsVO:
      raise Exception("Can not get VOMS name for VO %s" % vo)

    self.urls = []
    result = gConfig.getSections('/Registry/VO/%s/VOMSServers' % self.vo)
    if result['OK']:
      for server in result['Value']:
        gLogger.verbose("Adding 'https://%s:8443/voms/%s/apiv2/users'" % (server, self.vomsVO))
        self.urls.append('https://%s:8443/voms/%s/apiv2/users' % (server, self.vomsVO))
    else:
      gLogger.error("Section '/Registry/VO/%s/VOMSServers' not found" % self.vo)

    self.userDict = None
Example #17
0
  def _syncResources( self ):
    '''
      Sync resources: compares CS with DB and does the necessary modifications.
      ( StorageElements, FTS, FileCatalogs and ComputingElements )
    '''
    
    gLogger.verbose( '-- Synchronizing Resources --' )
    
    gLogger.verbose( '-> StorageElements' )
    ses = self.__syncStorageElements()
    if not ses[ 'OK' ]:
      gLogger.error( ses[ 'Message' ] )
    
    gLogger.verbose( '-> FTS' )
    fts = self.__syncFTS()
    if not fts[ 'OK' ]:
      gLogger.error( fts[ 'Message' ] )
    
    gLogger.verbose( '-> FileCatalogs' )
    fileCatalogs = self.__syncFileCatalogs()
    if not fileCatalogs[ 'OK' ]:
      gLogger.error( fileCatalogs[ 'Message' ] ) 

    gLogger.verbose( '-> ComputingElements' )
    computingElements = self.__syncComputingElements()
    if not computingElements[ 'OK' ]:
      gLogger.error( computingElements[ 'Message' ] )

    #FIXME: VOMS

    return S_OK()
Example #18
0
 def __getClientInitArgs( self, trid, proposalTuple ):
   clientTransport = self._transportPool.get( trid )
   #Get the peer credentials
   credDict = clientTransport.getConnectingCredentials()
   if 'x509Chain' not in credDict:
     return S_OK()
   cKey = ( credDict[ 'DN' ],
            credDict.get( 'group', False ),
            credDict.get( 'extraCredentials', False ),
            credDict[ 'isLimitedProxy' ] )
   dP = self.__delegatedCredentials.get( cKey, 3600 )
   idString = self._createIdentityString( credDict, clientTransport )
   if dP:
     gLogger.verbose( "Proxy for %s is cached" % idString )
     return S_OK( dP )
   result = self.__requestDelegation( clientTransport, credDict )
   if not result[ 'OK' ]:
     gLogger.warn( "Could not get proxy for %s: %s" % ( idString, result[ 'Message' ] ) )
     return result
   delChain = result[ 'Value' ]
   delegatedChain = delChain.dumpAllToString()[ 'Value' ]
   secsLeft = delChain.getRemainingSecs()[ 'Value' ] - 1
   clientInitArgs = {
                       BaseClient.KW_SETUP : proposalTuple[0][1],
                       BaseClient.KW_TIMEOUT : 600,
                       BaseClient.KW_IGNORE_GATEWAYS : True,
                       BaseClient.KW_USE_CERTIFICATES : False,
                       BaseClient.KW_PROXY_STRING : delegatedChain
                       }
   if BaseClient.KW_EXTRA_CREDENTIALS in credDict:
     clientInitArgs[ BaseClient.KW_EXTRA_CREDENTIALS ] = credDict[ BaseClient.KW_EXTRA_CREDENTIALS ]
   gLogger.warn( "Got delegated proxy for %s: %s secs left" % ( idString, secsLeft ) )
   self.__delegatedCredentials.add( cKey, secsLeft, clientInitArgs )
   return S_OK( clientInitArgs )
 def __fillOptimizers( self, opName ):
   gLogger.verbose( "Filling %s optimizers" % opName )
   while True:
     #Get first job if any
     jid = self.__jobOpQueue.popJob( opName )
     if jid == None:
       gLogger.verbose( "No more jobs for optimizers %s" % opName )
       #No more jobs
       return S_OK()
     result = self.__bookJobInOptimizer( opName, jid )
     if not result[ 'OK' ]:
       gLogger.info( "No empty %s optimizers now" % opName )
       self.__jobOpQueue.pushJob( opName, jid, ahead = True )
       return S_OK()
     opState = result[ 'Value' ]
     result = self.__jobsState.getJob( jid )
     if not result[ 'OK' ]:
       gLogger.warn( "Could not load job data", "for jid %s" % jid )
       self.__jobsState.forgetJob( jid )
       continue
     job = result[ 'Value' ]
     result = opState.requestOptimization( job )
     if not result[ 'OK' ]:
       self.__jobOpQueue.pushJob( opName, jid, ahead = True )
       gLogger.warn( "Could not request optimization", result[ 'Message' ] )
       self.optimizerDisconnected( opState.getTrid() )
   return S_OK()
 def forgetJob( self, jid ):
   gLogger.verbose( "Forgetting about jid %s" % jid )
   self.__lock.acquire()
   try:
     return self.__innerForgetJob( jid )
   finally:
     self.__lock.release()
Example #21
0
  def __uploadInputSandbox( self, classAdJob, jobDescriptionObject = None ):
    """Checks the validity of the job Input Sandbox.
       The function returns the list of Input Sandbox files.
       The total volume of the input sandbox is evaluated
    """
    inputSandbox = self.__getInputSandboxEntries( classAdJob )

    realFiles = []
    badFiles = []
    diskFiles = []

    for isFile in inputSandbox:
      valid = True
      for tag  in ( 'lfn:', 'LFN:', 'SB:', '%s' ):  # in case of parametric input sandbox, there is %s passed, so have to ignore it also
        if isFile.find( tag ) == 0:
          valid = False
          break
      if valid:
        realFiles.append( isFile )

    stringIOFiles = []
    stringIOFilesSize = 0
    if jobDescriptionObject is not None:
      if isinstance( jobDescriptionObject, StringIO.StringIO ):
        stringIOFiles = [jobDescriptionObject]
        stringIOFilesSize = len( jobDescriptionObject.buf )
        gLogger.debug( "Size of the stringIOFiles: " + str( stringIOFilesSize ) )
      else:
        return S_ERROR( "jobDescriptionObject is not a StringIO object" )

    # Check real files
    for isFile in realFiles:
      if not os.path.exists( isFile ):  # we are passing in real files, we expect them to be on disk
        badFiles.append( isFile )
        gLogger.warn( "inputSandbox file/directory " + isFile + " not found. Keep looking for the others" )
        continue
      diskFiles.append( isFile )

    diskFilesSize = File.getGlobbedTotalSize( diskFiles )
    gLogger.debug( "Size of the diskFiles: " + str( diskFilesSize ) )
    totalSize = diskFilesSize + stringIOFilesSize
    gLogger.verbose( "Total size of the inputSandbox: " + str( totalSize ) )

    okFiles = stringIOFiles + diskFiles
    if badFiles:
      result = S_ERROR( 'Input Sandbox is not valid' )
      result['BadFile'] = badFiles
      result['TotalSize'] = totalSize
      return result

    if okFiles:
      if not self.sandboxClient:
        self.sandboxClient = SandboxStoreClient( useCertificates = self.useCertificates )
      result = self.sandboxClient.uploadFilesAsSandbox( okFiles )
      if not result[ 'OK' ]:
        return result
      inputSandbox.append( result[ 'Value' ] )
      classAdJob.insertAttributeVectorString( "InputSandbox", inputSandbox )

    return S_OK()
  def __checkoutFromGit( self ):
    if self.params.vcsBranch:
      brCmr = "-b %s" % self.params.vcsBranch
    else:
      brCmr = ""
    fDirName = os.path.join( self.params.destination, self.params.name )
    cmd = "git clone %s '%s' '%s'" % ( brCmr,
                                           self.params.sourceURL,
                                           fDirName )
    gLogger.verbose( "Executing: %s" % cmd )
    if os.system( cmd ):
      return S_ERROR( "Error while retrieving sources from git" )

    branchName = "DIRACDistribution-%s" % os.getpid()

    cmd = "( cd '%s'; git checkout -b '%s' '%s' )" % ( fDirName, branchName, self.params.version )

    gLogger.verbose( "Executing: %s" % cmd )
    exportRes = os.system( cmd )
    shutil.rmtree( "%s/.git" % fDirName )

    if exportRes:
      return S_ERROR( "Error while exporting from git" )

    return S_OK()
Example #23
0
 def extendTransformation( self, transID, maxTasks ):
   gLogger.info( "Considering extension of transformation %d" % transID )
   # Get the current count of tasks submitted for this transformation
   res = self.transClient.getTransformationTaskStats( transID )
   if not res['OK']:
     if res['Message'] != 'No records found':
       gLogger.error( "Failed to get task statistics", "%s %s" % ( transID, res['Message'] ) )
       return res
     else:
       statusDict = {}
   else:
     statusDict = res['Value']
   gLogger.verbose( "Current task count for transformation %d" % transID )
   for status in sortList( statusDict.keys() ):
     statusCount = statusDict[status]
     gLogger.verbose( "%s : %s" % ( status.ljust( 20 ), str( statusCount ).rjust( 8 ) ) )
   # Determine the number of tasks to be created
   numberOfTasks = self.calculateTaskNumber( maxTasks, statusDict )
   if not numberOfTasks:
     gLogger.info( "No tasks required for transformation %d" % transID )
     return S_OK()
   # Extend the transformation by the determined number of tasks
   res = self.transClient.extendTransformation( transID, numberOfTasks )
   if not res['OK']:
     gLogger.error( "Failed to extend transformation", "%s %s" % ( transID, res['Message'] ) )
     return res
   gLogger.info( "Successfully extended transformation %d by %d tasks" % ( transID, numberOfTasks ) )
   return S_OK()
 def updateTaskStatus(self):
   gLogger.info("updateTaskStatus: Updating the Status of tasks")
   # Get the transformations to be updated
   status = self.am_getOption('UpdateTasksStatus',['Active','Completing','Stopped'])
   res = self._selectTransformations(transType=self.transType,status=status,agentType=[])
   if not res['OK']:
     return res
   for transformation in res['Value']:
     transID = transformation['TransformationID']
     # Get the tasks which are in a UPDATE state
     updateStatus = self.am_getOption('TaskUpdateStatus',['Checking','Deleted','Killed','Staging','Stalled','Matched','Rescheduled','Completed','Submitted','Received','Waiting','Running'])
     condDict = {"TransformationID":transID,"ExternalStatus":updateStatus}
     timeStamp = str(datetime.datetime.utcnow() - datetime.timedelta(minutes=10))
     res = self.transClient.getTransformationTasks(condDict=condDict,older=timeStamp, timeStamp='LastUpdateTime')
     if not res['OK']:
       gLogger.error("updateTaskStatus: Failed to get tasks to update for transformation", "%s %s" % (transID,res['Message']))
       continue
     if not res['Value']:
       gLogger.verbose("updateTaskStatus: No tasks found to update for transformation %s" % transID)
       continue
     res = self.getSubmittedTaskStatus(res['Value'])
     if not res['OK']:
       gLogger.error("updateTaskStatus: Failed to get updated task statuses for transformation", "%s %s" % (transID,res['Message']))
       continue
     statusDict = res['Value']
     for status in sortList(statusDict.keys()):
       taskIDs = statusDict[status]
       gLogger.info("updateTaskStatus: Updating %d task(s) from transformation %d to %s" % (len(taskIDs),transID,status))
       res = self.transClient.setTaskStatus(transID,taskIDs,status)
       if not res['OK']:
         gLogger.error("updateTaskStatus: Failed to update task status for transformation", "%s %s" % (transID,res['Message']))
           
   gLogger.info("updateTaskStatus: Transformation task status update complete")  
   return S_OK()
def cleanUpLFNPath( lfn ):
  """ Normalise LFNs
  """
  gLogger.debug("LFN before Cleanup", lfn)
  lfn = posixpath.normpath(lfn)
  gLogger.verbose("LFN after Cleanup", lfn)
  return lfn
    def __checkoutFromGit(self):
        if self.params.vcsBranch:
            brCmr = "-b %s" % self.params.vcsBranch
        else:
            brCmr = ""
        fDirName = os.path.join(self.params.destination, self.params.name)
        cmd = "git clone %s '%s' '%s'" % (brCmr, self.params.sourceURL, fDirName)
        gLogger.verbose("Executing: %s" % cmd)
        if os.system(cmd):
            return S_ERROR("Error while retrieving sources from git")

        branchName = "DIRACDistribution-%s" % os.getpid()

        isTagCmd = "( cd '%s'; git tag -l | grep '%s' )" % (fDirName, self.params.version)
        if os.system(isTagCmd):
            # No tag found, assume branch
            branchSource = "origin/%s" % self.params.version
        else:
            branchSource = self.params.version

        cmd = "( cd '%s'; git checkout -b '%s' '%s' )" % (fDirName, branchName, branchSource)

        gLogger.verbose("Executing: %s" % cmd)
        exportRes = os.system(cmd)

        # Add the keyword substitution
        gLogger.notice("Replacing keywords (can take a while)...")
        self.replaceKeywordsWithGit(fDirName)

        shutil.rmtree("%s/.git" % fDirName)

        if exportRes:
            return S_ERROR("Error while exporting from git")

        return S_OK()
Example #27
0
 def setGraphsLocation( self, graphsDir ):
   self.graphsLocation = graphsDir
   for graphName in os.listdir( self.graphsLocation ):
     if graphName.find( ".png" ) > 0:
       graphLocation = "%s/%s" % ( self.graphsLocation, graphName )
       gLogger.verbose( "Purging %s" % graphLocation )
       os.unlink( graphLocation )
Example #28
0
def getLogPath(paramDict, basePath=None):
  """ Can construct log file paths even if job fails e.g. no output files available.

  :param dict paramDict: dictionary with at least the keys ``PRODUCTION_ID``, ``JOB_ID``, ``LogFilePath``
  :param str basePath: Optional, base path for the log file failover, of not set LogFilePath
    from paramDict is used as a base
  :returns: S_OK with dict with LogFilePath and LogTargetPath
  """
  result = checkForMandatoryKeys(paramDict, ['PRODUCTION_ID', 'JOB_ID', 'LogFilePath'])
  if not result['OK']:
    return result

  productionID = paramDict['PRODUCTION_ID']
  jobID = paramDict['JOB_ID']
  logFileName = "%s_%s.tar" %( str(productionID).zfill(8), str(int(jobID)).zfill(4) )
  if basePath:
    logTargetPath = [ cleanUpLFNPath( os.path.join( basePath, "LOG", str(productionID).zfill(8), logFileName ) ) ]
  else:
    #need to built logPath from logFilePath, as it's not there, and must be as in method above
    logPathtemp = cleanUpLFNPath(paramDict['LogFilePath']).split("/")
    logPath = "/"+os.path.join(*logPathtemp[0:-1])
    logTargetPath = ['%s/%s_%s.tar' % ( logPath, str(productionID).zfill(8), str(int(jobID)).zfill(3))]

  #this is not doing anything except return the same string as was passed into the function
  logFilePath = paramDict['LogFilePath']
  gLogger.verbose('Log file path is: %s' % logFilePath)
  gLogger.verbose('Log target path is: %s' % logTargetPath)
  jobOutputs = {'LogFilePath' : logFilePath, 'LogTargetPath' : logTargetPath}
  return S_OK(jobOutputs)
Example #29
0
 def __setParam(self, value):
     change = False
     if self.item_called in self.paramTypes:
         oldValue = self.paramValues[self.item_called]
         if oldValue != value:
             if type(value) in self.paramTypes[self.item_called]:
                 change = True
             else:
                 raise TypeError, "%s %s %s expected one of %s" % (
                     self.item_called,
                     value,
                     type(value),
                     self.paramTypes[self.item_called],
                 )
     if not self.item_called in self.paramTypes.keys():
         if not self.paramValues.has_key(self.item_called):
             change = True
         else:
             oldValue = self.paramValues[self.item_called]
             if oldValue != value:
                 change = True
     if not change:
         gLogger.verbose("No change of parameter %s required" % self.item_called)
     else:
         gLogger.verbose("Parameter %s to be changed" % self.item_called)
         transID = self.paramValues["TransformationID"]
         if self.exists and transID:
             res = self.transClient.setTransformationParameter(transID, self.item_called, value)
             if not res["OK"]:
                 return res
         self.paramValues[self.item_called] = value
     return S_OK()
Example #30
0
  def isSubRequestDone( self, ind, rType ):
    """ Check if the request contains more operations to be performed
    """
    if not self.subRequests.has_key( rType ):
      return S_ERROR( "No requests of type specified found." )
    elif len( self.subRequests[rType] ) <= ind:
      return S_ERROR( "Subrequest index is out of range." )
    else:
      status = self.getSubRequestAttributeValue( ind, rType, "Status" )['Value']
      if status == 'Done':
        return S_OK( 1 )
      files = self.getSubRequestFiles( ind, rType )['Value']
      for rFile in files:
        if not rFile.has_key( 'Status' ):
          gLogger.error( "!!! The file has no status information !!!" )
          gLogger.error( "Ind:%s Type:%s" % ( ind, rType ), self.toXML()['Value'] )
        elif rFile['Status'] not in ( 'Done', 'Failed' ):
          gLogger.verbose( 'Found file in a non-Done or non-Failed state' )
          return S_OK( 0 )
      datasets = self.getSubRequestDatasets( ind, rType )['Value']
      for dataset in datasets:
        if dataset['Status'] != 'Done':
          return S_OK( 0 )

    if files or datasets:
      return S_OK( 1 )
    else:
      return S_OK( 0 )
Example #31
0
def getType(fileNames, directory=''):
    """ This function searches the directory for POOL XML catalog files and extracts the type of the pfn.

      fileNames can be a string or a list, directory defaults to PWD.
  """

    if not directory:
        directory = os.getcwd()

    if not os.path.isdir(directory):
        return S_ERROR('%s is not a directory' % directory)

    if not isinstance(fileNames, list):
        fileNames = [fileNames]

    gLogger.verbose('Will look for POOL XML Catalog file types in %s for %s' %
                    (directory, ', '.join(fileNames)))

    finalCatList = _getPoolCatalogs(directory)

    # Create POOL catalog with final list of catalog files and extract GUIDs
    generated = []
    pfnTypes = {}
    catalog = PoolXMLCatalog(finalCatList)
    for fname in fileNames:
        typeFile = str(catalog.getTypeByPfn(fname))
        if not typeFile:
            typeFile = 'ROOT_All'
            generated.append(fname)

        pfnTypes[fname] = typeFile

    if not generated:
        gLogger.info('Found Types from POOL XML Catalogue for all files: %s' %
                     ', '.join(fileNames))
    else:
        gLogger.info(
            'GUIDs not found from POOL XML Catalogue (and were generated) for: %s'
            % ', '.join(generated))

    result = S_OK(pfnTypes)
    result['directory'] = directory
    result['generated'] = generated
    return result
Example #32
0
 def __sendAccounting(self, regSuc, regTotal, regTime, transEndTime):
     transSuc = 0
     transSize = 0
     missingSize = []
     for lfn in self.fileDict.keys():
         if self.fileDict[lfn].get('Status') == 'Finished':
             transSuc += 1
             if not self.catalogMetadata.has_key(lfn):
                 missingSize.append(lfn)
     if missingSize:
         self.__updateMetadataCache(missingSize)
     for lfn in self.fileDict.keys():
         if self.fileDict[lfn].get('Status') == 'Finished':
             transSize += self.catalogMetadata[lfn]['Size']
     transTotal = 0
     for state in (self.statusSummary.keys()):
         transTotal += self.statusSummary[state]
     submitTime = fromString(self.submitTime)
     endTime = fromString(transEndTime)
     oAccounting = DataOperation()
     #oAccounting.setEndTime(endTime)
     oAccounting.setEndTime(transEndTime)
     oAccounting.setStartTime(submitTime)
     accountingDict = {}
     accountingDict['OperationType'] = 'replicateAndRegister'
     accountingDict['User'] = '******'
     accountingDict['Protocol'] = 'FTS'
     accountingDict['RegistrationTime'] = regTime
     accountingDict['RegistrationOK'] = regSuc
     accountingDict['RegistrationTotal'] = regTotal
     accountingDict['TransferOK'] = transSuc
     accountingDict['TransferTotal'] = transTotal
     accountingDict['TransferSize'] = transSize
     accountingDict['FinalStatus'] = self.requestStatus
     accountingDict['Source'] = self.sourceSE
     accountingDict['Destination'] = self.targetSE
     c = transEndTime - submitTime
     transferTime = c.days * 86400 + c.seconds
     accountingDict['TransferTime'] = transferTime
     oAccounting.setValuesFromDict(accountingDict)
     gLogger.verbose("Attempting to commit accounting message...")
     oAccounting.commit()
     gLogger.verbose("...committed.")
     return S_OK()
Example #33
0
  def __init__(self, host, port, user=None, password=None, indexPrefix='', useSSL=True):
    """ c'tor
    :param self: self reference
    :param str host: name of the database for example: MonitoringDB
    :param str port: The full name of the database for example: 'Monitoring/MonitoringDB'
    :param str user: user name to access the db
    :param str password: if the db is password protected we need to provide a password
    :param str indexPrefix: it is the indexPrefix used to get all indexes
    :param bool useSSL: We can disable using secure connection. By default we use secure connection.
    """

    self.__indexPrefix = indexPrefix
    self._connected = False
    if user and password:
      gLogger.debug("Specified username and password")
      self.__url = "https://%s:%s@%s:%d" % (user, password, host, port)
    else:
      gLogger.debug("Username and password not specified")
      self.__url = "http://%s:%d" % (host, port)

    gLogger.verbose("Connecting to %s:%s, useSSL = %s" % (host, port, useSSL))

    if useSSL:
      bd = BundleDeliveryClient()
      retVal = bd.getCAs()
      casFile = None
      if not retVal['OK']:
        gLogger.error("CAs file does not exists:", retVal['Message'])
        casFile = certifi.where()
      else:
        casFile = retVal['Value']

      self.__client = Elasticsearch(self.__url,
                                    timeout=self.__timeout,
                                    use_ssl=True,
                                    verify_certs=True,
                                    ca_certs=casFile)
    else:
      self.__client = Elasticsearch(self.__url, timeout=self.__timeout)

    gLogger.verbose("ElasticSearchDB URL: %s" % self.__url)

    self.__tryToConnect()
Example #34
0
  def __compileWebApp(self):
    """
    This method is compile the DIRAC web framework
    """
    dctArgs = []
    if self.params.extjspath:
      dctArgs.append("-P '%s'" % self.params.extjspath)

    destDir = self.params.destination
    dctArgs.append("-D '%s'" % destDir)
    scriptName = os.path.join("%s/WebAppDIRAC/scripts/" % destDir, "dirac-webapp-compile.py")
    if not os.path.isfile(scriptName):
      return S_ERROR("%s file does not exists!" % scriptName)
    dctArgs.append("-n '%s'" % self.params.name)
    cmd = "'%s' %s" % (scriptName, " ".join(dctArgs))
    gLogger.verbose("Executing %s" % cmd)
    if os.system(cmd) != 0:
      return S_ERROR("Failed to execute the command")
    return S_OK()
Example #35
0
    def commit(self):
        """
    Send the registers in a bundle mode
    """
        rpcClient = self.__getRPCClient()
        sent = 0

        # create a local reference and prevent other running commits
        # to take the same data second time
        self.__registersListLock.acquire()
        registersList = self.__registersList
        self.__registersList = []
        self.__registersListLock.release()

        try:
            while registersList:
                registersToSend = registersList[:self.__maxRecordsInABundle]
                retVal = rpcClient.commitRegisters(registersToSend)
                if retVal['OK']:
                    self.__lastSuccessfulCommit = time.time()
                else:
                    gLogger.error('Error sending accounting record',
                                  retVal['Message'])
                    if self.__failoverEnabled and time.time(
                    ) - self.__lastSuccessfulCommit > self.__maxTimeRetrying:
                        gLogger.verbose(
                            "Sending accounting records to failover")
                        result = _sendToFailover(retVal['rpcStub'])
                        if not result['OK']:
                            return result
                    else:
                        return S_ERROR(
                            "Cannot commit data to DataStore service")
                sent += len(registersToSend)
                del registersList[:self.__maxRecordsInABundle]
        except Exception as e:  # pylint: disable=broad-except
            gLogger.exception("Error committing", lException=e)
            return S_ERROR("Error committing %s" % repr(e).replace(',)', ')'))
        finally:
            # if something is left because of an error return it to the main list
            self.__registersList.extend(registersList)

        return S_OK(sent)
def _createReplication(targetSE, sourceSE, prodID, datatype, extraname=''):
    """Creates the replication transformation based on the given parameters"""

    from DIRAC.TransformationSystem.Client.Transformation import Transformation
    from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
    metadata = {"Datatype": datatype, "ProdID": prodID}

    trans = Transformation()
    transName = 'replicate_%s_%s' % (str(prodID), ",".join(targetSE))
    if extraname:
        transName += "_%s" % extraname

    trans.setTransformationName(transName)
    description = 'Replicate files for prodID %s to %s' % (str(prodID),
                                                           ",".join(targetSE))
    trans.setDescription(description)
    trans.setLongDescription(description)
    trans.setType('Replication')
    trans.setGroup('Replication')
    trans.setPlugin('Broadcast')
    res = trans.setSourceSE(sourceSE)
    if not res['OK']:
        exit(1)
    res = trans.setTargetSE(targetSE)
    if not res['OK']:
        exit(1)

    res = trans.addTransformation()
    if not res['OK']:
        gLogger.error(res['Message'])
        exit(1)
    gLogger.verbose(res)
    trans.setStatus('Active')
    trans.setAgentType('Automatic')
    currtrans = trans.getTransformationID()['Value']
    client = TransformationClient()
    res = client.createTransformationInputDataQuery(currtrans, metadata)
    if res['OK']:
        gLogger.always("Successfully created replication transformation")
        return S_OK()
    else:
        gLogger.error("Failure during replication creation", res['Message'])
        return S_ERROR("Failed to create transformation")
Example #37
0
    def addToCache(self, fileName, lifeTime=False):
        """
    Adds a new file to the cache. If the file is already there, its timer is reset
    """

        # Check to see if the file should be excluded.
        for pattern in self.__cacheExceptions:
            if re.match(pattern, fileName):
                gLogger.verbose(
                    'File matches exclusion pattern. Ignoring: %s' % fileName)
                return S_OK(fileName)

        if not lifeTime:
            lifeTime = self.__defaultLifeTime
        now = time.time()
        self.__cachedFiles[fileName] = [now, lifeTime]
        gLogger.verbose('File added to cache. File %s. Lifetime: %d' %
                        (fileName, lifeTime))
        return S_OK(fileName)
Example #38
0
  def _loadHandlerInit( self ):
    handlerClass = self._svcData[ 'classObj' ]
    handlerName = handlerClass.__name__
    handlerInitMethods = self.__searchInitFunctions( handlerClass )
    try:
      handlerInitMethods.append( getattr( self._svcData[ 'moduleObj' ], "initialize%s" % handlerName ) )
    except AttributeError:
      gLogger.verbose( "Not found global initialization function for service" )

    if handlerInitMethods:
      gLogger.info( "Found %s initialization methods" % len( handlerInitMethods ) )

    handlerInfo = {}
    handlerInfo[ "name" ] = handlerName
    handlerInfo[ "module" ] = self._svcData[ 'moduleObj' ]
    handlerInfo[ "class" ] = handlerClass
    handlerInfo[ "init" ] = handlerInitMethods

    return S_OK( handlerInfo )
Example #39
0
    def userHasProxy(self, userDN, userGroup, validSeconds=0):
        """ Check if a user(DN-group) has a proxy in the proxy management
        Updates internal cache if needed to minimize queries to the service

        :param basestring userDN: user DN
        :param basestring userGroup: user group
        :param int validSeconds: proxy valid time in a seconds

        :return: S_OK()/S_ERROR()
    """
        cacheKey = (userDN, userGroup)
        if self.__usersCache.exists(cacheKey, validSeconds):
            return S_OK(True)
        # Get list of users from the DB with proxys at least 300 seconds
        gLogger.verbose("Updating list of users in proxy management")
        retVal = self.__refreshUserCache(validSeconds)
        if not retVal['OK']:
            return retVal
        return S_OK(self.__usersCache.exists(cacheKey, validSeconds))
Example #40
0
def makeGuid(fileNames):
    """ Function to retrieve a file GUID using Root.
  """
    if isinstance(fileNames, basestring):
        fileNames = [fileNames]

    fileGUIDs = {}
    for fileName in fileNames:
        res = getRootFileGUID(fileName)
        if res['OK']:
            gLogger.verbose('GUID from ROOT', '%s' % res['Value'])
            fileGUIDs[fileName] = res['Value']
        else:
            gLogger.error(
                'Could not obtain GUID from file through Gaudi, using standard DIRAC method'
            )
            fileGUIDs[fileName] = DIRACMakeGUID(fileName)

    return fileGUIDs
Example #41
0
    def _syncSites(self):
        '''
      Sync sites: compares CS with DB and does the necessary modifications.
    '''

        gLogger.info('-- Synchronizing sites --')

        # sites in CS
        res = getSites()
        if not res['OK']:
            return res
        sitesCS = res['Value']

        gLogger.verbose('%s sites found in CS' % len(sitesCS))

        # sites in RSS
        result = self.rStatus.selectStatusElement('Site',
                                                  'Status',
                                                  meta={'columns': ['Name']})
        if not result['OK']:
            return result
        sitesDB = [siteDB[0] for siteDB in result['Value']]

        # Sites that are in DB but not (anymore) in CS
        toBeDeleted = list(set(sitesDB).difference(set(sitesCS)))
        gLogger.verbose('%s sites to be deleted' % len(toBeDeleted))

        # Delete sites
        for siteName in toBeDeleted:
            deleteQuery = self.rStatus._extermineStatusElement(
                'Site', siteName)
            gLogger.verbose('Deleting site %s' % siteName)
            if not deleteQuery['OK']:
                return deleteQuery

        # Sites that are in CS but not (anymore) in DB
        toBeAdded = list(set(sitesCS).difference(set(sitesDB)))
        gLogger.verbose('%s site entries to be added' % len(toBeAdded))

        for site in toBeAdded:
            query = self.rStatus.addIfNotThereStatusElement(
                'Site',
                'Status',
                name=site,
                statusType='all',
                status=self.defaultStatus,
                elementType='Site',
                tokenOwner=self.tokenOwner,
                reason='Synchronized')
            if not query['OK']:
                return query

        return S_OK()
Example #42
0
 def purge(self):
     """
 Purge exired files
 """
     now = time.time()
     filesToDelete = []
     for fileName in self.__cachedFiles:
         fileData = self.__cachedFiles[fileName]
         if fileData[0] + fileData[1] < now:
             filesToDelete.append(fileName)
     while filesToDelete:
         fileName = filesToDelete.pop()
         try:
             gLogger.verbose("Purging %s" % fileName)
             os.unlink("%s/%s" % (self.__filesLocation, fileName))
         except Exception, e:
             gLogger.error("Can't delete file file %s: %s" %
                           (fileName, str(e)))
         del (self.__cachedFiles[fileName])
Example #43
0
    def generate(self, reportRequest):
        """
        It retrives the data from the database and creates the plot

        :param dict reportRequest: contains the plot attributes
        """
        reportHash = reportRequest["hash"]
        reportName = reportRequest["reportName"]
        if reportName in self.__reportNameMapping:
            reportRequest["reportName"] = self.__reportNameMapping[reportName]

        gLogger.info("Retrieving data for %s:%s" %
                     (reportRequest["typeName"], reportRequest["reportName"]))
        sT = time.time()
        retVal = self.__retrieveReportData(reportRequest, reportHash)
        reportGenerationTime = time.time() - sT
        if not retVal["OK"]:
            return retVal
        if not reportRequest["generatePlot"]:
            return retVal
        reportData = retVal["Value"]
        gLogger.info("Plotting data for %s:%s" %
                     (reportRequest["typeName"], reportRequest["reportName"]))
        sT = time.time()
        retVal = self.__generatePlotForReport(reportRequest, reportHash,
                                              reportData)
        plotGenerationTime = time.time() - sT
        gLogger.verbose(
            "Time for %s:%s - Report %.2f Plot %.2f (%.2f%% r/p)" % (
                reportRequest["typeName"],
                reportRequest["reportName"],
                reportGenerationTime,
                plotGenerationTime,
                ((reportGenerationTime * 100 /
                  plotGenerationTime) if plotGenerationTime else 0.0),
            ))
        if not retVal["OK"]:
            return retVal
        plotDict = retVal["Value"]
        if "retrieveReportData" in reportRequest["extraArgs"] and reportRequest[
                "extraArgs"]["retrieveReportData"]:
            plotDict["reportData"] = reportData
        return S_OK(plotDict)
Example #44
0
def getAncestorFiles(inputData, ancestorDepth):
    """ Returns S_OK(<list of files>) or S_ERROR(<Message>) after querying the
      Bookkeeping for ancestor files.

      Input data can be an LFN string or a list of LFNs.  Ancestor depth is an integer or
      string that converts to an integer.

      If successful, the original input data LFNs are also returned in the list.
  """
    res = getFileAncestors(inputData, ancestorDepth)
    if not res['OK']:
        return res
    inputDataWithAncestors = res['Value'].keys()
    for ancestorList in res['Value']:
        inputDataWithAncestors += res['Value'][ancestorList]
    totalFiles = len(inputDataWithAncestors) - len(inputData)
    gLogger.verbose('%s ancestor files retrieved from the bookkeeping \
  for ancestor depth %s' % (totalFiles, ancestorDepth))
    return S_OK(inputDataWithAncestors)
    def haltInstances(cls, vmList):
        """
        Common haltInstances for Running(from class VirtualMachineManagerHandler) and
        Stalled(from checkStalledInstances periodic task) to Halt
        """
        failed = {}
        successful = {}

        for instanceID in vmList:
            instanceID = int(instanceID)
            result = cls.virtualMachineDB.getUniqueID(instanceID)
            if not result["OK"]:
                gLogger.error("haltInstances: on getUniqueID call: %s" % result["Message"])
                continue
            uniqueID = result["Value"]

            result = cls.createEndpoint(uniqueID)
            if not result["OK"]:
                gLogger.error("haltInstances: on createEndpoint call: %s" % result["Message"])
                continue

            endpoint = result["Value"]

            # Get proxy to be used to connect to the cloud endpoint
            authType = endpoint.parameters.get("Auth")
            if authType and authType.lower() in ["x509", "voms"]:
                siteName = endpoint.parameters["Site"]
                ceName = endpoint.parameters["CEName"]
                gLogger.verbose("Getting cloud proxy for %s/%s" % (siteName, ceName))
                result = getProxyFileForCloud(endpoint)
                if not result["OK"]:
                    continue
                endpoint.setProxy(result["Value"])

            result = endpoint.stopVM(uniqueID)
            if result["OK"]:
                cls.virtualMachineDB.recordDBHalt(instanceID, 0)
                successful[instanceID] = True
            else:
                failed[instanceID] = result["Message"]

        return S_OK({"Successful": successful, "Failed": failed})
Example #46
0
 def submitTasks( self ):
   gLogger.info( "submitTasks: Submitting tasks for transformations" )
   res = getProxyInfo( False, False )
   if not res['OK']:
     gLogger.error( "submitTasks: Failed to determine credentials for submission", res['Message'] )
     return res
   proxyInfo = res['Value']
   owner = proxyInfo['username']
   ownerGroup = proxyInfo['group']
   gLogger.info( "submitTasks: Tasks will be submitted with the credentials %s:%s" % ( owner, ownerGroup ) )
   # Get the transformations which should be submitted
   tasksPerLoop = self.am_getOption( 'TasksPerLoop', 50 )
   status = self.am_getOption( 'SubmitStatus', ['Active', 'Completing'] )
   res = self._selectTransformations( transType = self.transType, status = status )
   if not res['OK']:
     return res
   for transformation in res['Value']:
     transID = transformation['TransformationID']
     transBody = transformation['Body']
     res = self.transClient.getTasksToSubmit( transID, tasksPerLoop )
     if not res['OK']:
       gLogger.error( "submitTasks: Failed to obtain tasks for transformation", "%s %s" % ( transID, res['Message'] ) )
       continue
     tasks = res['Value']['JobDictionary']
     if not tasks:
       gLogger.verbose( "submitTasks: No tasks found for submission for transformation %s" % transID )
       continue
     gLogger.info( "submitTasks: Obtained %d tasks for submission for transformation %s" % ( len( tasks ), transID ) )
     res = self.taskManager.prepareTransformationTasks( transBody, tasks, owner, ownerGroup )
     if not res['OK']:
       gLogger.error( "submitTasks: Failed to prepare tasks for transformation", "%s %s" % ( transID, res['Message'] ) )
       continue
     res = self.taskManager.submitTransformationTasks( res['Value'] )
     if not res['OK']:
       gLogger.error( "submitTasks: Failed to submit prepared tasks for transformation", "%s %s" % ( transID, res['Message'] ) )
       continue
     res = self.taskManager.updateDBAfterTaskSubmission( res['Value'] )
     if not res['OK']:
       gLogger.error( "submitTasks: Failed to update DB after task submission for transformation", "%s %s" % ( transID, res['Message'] ) )
       continue
   gLogger.info( "submitTasks: Submission of transformation tasks complete" )
   return S_OK()
Example #47
0
    def createCatalog(self, catalogName, useProxy=False):
        """ Create a file catalog object from its name and CS description
    """
        if useProxy:
            catalog = FileCatalogProxyClient(catalogName)
            return S_OK(catalog)

        # get the CS description first
        catalogPath = getCatalogPath(catalogName)
        catalogType = gConfig.getValue(catalogPath + '/CatalogType',
                                       catalogName)
        catalogURL = gConfig.getValue(catalogPath + '/CatalogURL', '')

        self.log.verbose('Creating %s client' % catalogName)
        moduleRootPaths = getInstalledExtensions()
        for moduleRootPath in moduleRootPaths:
            gLogger.verbose("Trying to load from root path %s" %
                            moduleRootPath)
            #moduleFile = os.path.join( rootPath, moduleRootPath, "Resources", "Catalog", "%sClient.py" % catalogType )
            #gLogger.verbose( "Looking for file %s" % moduleFile )
            #if not os.path.isfile( moduleFile ):
            #  continue
            try:
                # This enforces the convention that the plug in must be named after the file catalog
                moduleName = "%sClient" % (catalogType)
                catalogModule = __import__(
                    '%s.Resources.Catalog.%s' % (moduleRootPath, moduleName),
                    globals(), locals(), [moduleName])
            except ImportError, x:
                if "No module" in str(x):
                    gLogger.debug('Catalog module %s not found in %s' %
                                  (catalogType, moduleRootPath))
                else:
                    errStr = "Failed attempt to import %s from the path %s: %s" % (
                        catalogType, moduleRootPath, x)
                    gLogger.error(errStr)
                continue
            except Exception, x:
                errStr = "Failed attempt to import %s from the path %s: %s" % (
                    catalogType, moduleRootPath, x)
                gLogger.error(errStr)
                continue
Example #48
0
 def checkReservedTasks( self ):
   gLogger.info( "checkReservedTasks: Checking Reserved tasks" )
   # Get the transformations which should be checked
   status = self.am_getOption( 'CheckReservedStatus', ['Active', 'Completing', 'Stopped'] )
   res = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
   if not res['OK']:
     return res
   for transformation in res['Value']:
     transID = transformation['TransformationID']
     # Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
     condDict = {"TransformationID":transID, "ExternalStatus":'Reserved'}
     time_stamp_older = str( datetime.datetime.utcnow() - datetime.timedelta( hours = 1 ) )
     time_stamp_newer = str( datetime.datetime.utcnow() - datetime.timedelta( days = 7 ) )
     res = self.transClient.getTransformationTasks( condDict = condDict, older = time_stamp_older, newer = time_stamp_newer, timeStamp = 'LastUpdateTime' )
     if not res['OK']:
       gLogger.error( "checkReservedTasks: Failed to get Reserved tasks for transformation", "%s %s" % ( transID, res['Message'] ) )
       continue
     if not res['Value']:
       gLogger.verbose( "checkReservedTasks: No Reserved tasks found for transformation %s" % transID )
       continue
     res = self.taskManager.updateTransformationReservedTasks( res['Value'] )
     if not res['OK']:
       gLogger.info( "checkReservedTasks: No Reserved tasks found for transformation %s" % transID )
       continue
     noTasks = res['Value']['NoTasks']
     taskNameIDs = res['Value']['TaskNameIDs']
     # For the tasks with no associated request found re-set the status of the task in the transformationDB
     for taskName in noTasks:
       transID, taskID = taskName.split( '_' )
       gLogger.info( "checkReservedTasks: Resetting status of %s to Created as no associated task found" % ( taskName ) )
       res = self.transClient.setTaskStatus( int( transID ), int( taskID ), 'Created' )
       if not res['OK']:
         gLogger.warn( "checkReservedTasks: Failed to update task status and ID after recovery", "%s %s" % ( taskName, res['Message'] ) )
     # For the tasks for which an associated request was found update the task details in the transformationDB
     for taskName, extTaskID in taskNameIDs.items():
       transID, taskID = taskName.split( '_' )
       gLogger.info( "checkReservedTasks: Resetting status of %s to Created with ID %s" % ( taskName, extTaskID ) )
       res = self.transClient.setTaskStatusAndWmsID( int( transID ), int( taskID ), 'Submitted', str( extTaskID ) )
       if not res['OK']:
         gLogger.warn( "checkReservedTasks: Failed to update task status and ID after recovery", "%s %s" % ( taskName, res['Message'] ) )
   gLogger.info( "checkReservedTasks: Updating of reserved tasks complete" )
   return S_OK()
    def __checkoutFromGit(self):
        if self.params.vcsBranch:
            brCmr = "-b %s" % self.params.vcsBranch
        else:
            brCmr = ""
        fDirName = os.path.join(self.params.destination, self.params.name)
        cmd = "git clone %s '%s' '%s'" % (brCmr, self.params.sourceURL,
                                          fDirName)
        gLogger.verbose("Executing: %s" % cmd)
        if os.system(cmd):
            return S_ERROR("Error while retrieving sources from git")

        branchName = "DIRACDistribution-%s" % os.getpid()

        isTagCmd = "( cd '%s'; git tag -l | grep '%s' )" % (
            fDirName, self.params.version)
        if os.system(isTagCmd):
            #No tag found, assume branch
            branchSource = 'origin/%s' % self.params.version
        else:
            branchSource = self.params.version

        cmd = "( cd '%s'; git checkout -b '%s' '%s' )" % (fDirName, branchName,
                                                          branchSource)

        gLogger.verbose("Executing: %s" % cmd)
        exportRes = os.system(cmd)

        #Add the keyword substitution
        gLogger.notice("Replacing keywords (can take a while)...")
        self.replaceKeywordsWithGit(fDirName)

        shutil.rmtree("%s/.git" % fDirName, ignore_errors=True)
        shutil.rmtree("%s/tests" % fDirName, ignore_errors=True)
        shutil.rmtree("%s/docs" % fDirName, ignore_errors=True)
        shutil.rmtree("%s/tests" % self.params.destination, ignore_errors=True)
        shutil.rmtree("%s/docs" % self.params.destination, ignore_errors=True)

        if exportRes:
            return S_ERROR("Error while exporting from git")

        return S_OK()
Example #50
0
    def initialize(self):
        """This replaces the standard initialize from Service"""
        # Build the URLs
        self._url = self._cfg.getURL()
        if not self._url:
            return S_ERROR("Could not build service URL for %s" % GatewayService.GATEWAY_NAME)
        gLogger.verbose("Service URL is %s" % self._url)
        # Load handler
        result = self._loadHandlerInit()
        if not result["OK"]:
            return result
        self._handler = result["Value"]
        # Discover Handler
        self._threadPool = ThreadPoolExecutor(max(0, self._cfg.getMaxThreads()))

        self._msgBroker = MessageBroker("%sMSB" % GatewayService.GATEWAY_NAME, threadPool=self._threadPool)
        self._msgBroker.useMessageObjects(False)
        getGlobalMessageBroker().useMessageObjects(False)
        self._msgForwarder = MessageForwarder(self._msgBroker)
        return S_OK()
Example #51
0
  def _getProductionID(self, prodName, connection=False):
    """ Method returns ID of production specified by the prodName

    :param str prodName: the Production name
    """
    try:
      prodName = long(prodName)
      cmd = "SELECT ProductionID from Productions WHERE ProductionID=%d;" % prodName
    except BaseException:
      if not isinstance(prodName, six.string_types):
        return S_ERROR("Production should be ID or name")
      cmd = "SELECT ProductionID from Productions WHERE ProductionName='%s';" % prodName
    res = self._query(cmd, connection)
    if not res['OK']:
      gLogger.error("Failed to obtain production ID for production", "%s: %s" % (prodName, res['Message']))
      return res
    elif not res['Value']:
      gLogger.verbose("Production %s does not exist" % (prodName))
      return S_ERROR("Production does not exist")
    return S_OK(res['Value'][0][0])
Example #52
0
 def getUserPersistence(self, userDN, userGroup, validSeconds=0):
     """
 Check if a user(DN-group) has a proxy in the proxy management
   - Updates internal cache if needed to minimize queries to the
       service
 """
     cacheKey = (userDN, userGroup)
     userData = self.__usersCache.get(cacheKey, validSeconds)
     if userData:
         if userData['persistent']:
             return S_OK(True)
     #Get list of users from the DB with proxys at least 300 seconds
     gLogger.verbose("Updating list of users in proxy management")
     retVal = self.__refreshUserCache(validSeconds)
     if not retVal['OK']:
         return retVal
     userData = self.__usersCache.get(cacheKey, validSeconds)
     if userData:
         return S_OK(userData['persistent'])
     return S_OK(False)
Example #53
0
 def getStorageParameters(self, protocol):
     """ Get protocol specific options
 """
     gLogger.verbose(
         "StorageElement.getStorageParameters: Obtaining storage parameters for %s protocol %s."
         % (self.name, protocol))
     res = self.getProtocols()
     availableProtocols = res['Value']
     if not protocol in availableProtocols:
         errStr = "StorageElement.getStorageParameters: Requested protocol not available for SE."
         gLogger.error(errStr, '%s for %s' % (protocol, self.name))
         return S_ERROR(errStr)
     for storage in self.storages:
         res = storage.getParameters()
         storageParameters = res['Value']
         if storageParameters['ProtocolName'] == protocol:
             return S_OK(storageParameters)
     errStr = "StorageElement.getStorageParameters: Requested protocol supported but no object found."
     gLogger.error(errStr, "%s for %s" % (protocol, self.name))
     return S_ERROR(errStr)
Example #54
0
 def __copyToExternalSE(self, localFilePath, sbPath):
     """
     Copy uploaded file to external SE
     """
     try:
         dm = DataManager()
         result = dm.put(sbPath, localFilePath, self.__externalSEName)
         if not result["OK"]:
             return result
         if "Successful" not in result["Value"]:
             gLogger.verbose("Oops, no successful transfers there", str(result))
             return S_ERROR("RM returned OK to the action but no successful transfers were there")
         okTrans = result["Value"]["Successful"]
         if sbPath not in okTrans:
             gLogger.verbose("Ooops, SB transfer wasn't in the successful ones", str(result))
             return S_ERROR("RM returned OK to the action but SB transfer wasn't in the successful ones")
         return S_OK((self.__externalSEName, okTrans[sbPath]))
     except Exception as e:
         gLogger.error("Error while moving sandbox to SE", "%s" % repr(e).replace(",)", ")"))
         return S_ERROR("Error while moving sandbox to SE")
Example #55
0
def getGUID(fileNames, directory=''):
    """ This function searches the directory for POOL XML catalog files and extracts the GUID.

      fileNames can be a string or a list, directory defaults to PWD.
  """
    if not directory:
        directory = os.getcwd()

    if not os.path.isdir(directory):
        return S_ERROR('%s is not a directory' % directory)

    if not type(fileNames) == type([]):
        fileNames = [fileNames]

    gLogger.verbose('Will look for POOL XML Catalog GUIDs in %s for %s' %
                    (directory, string.join(fileNames, ', ')))
    patterns = ['*.xml', '*.xml*gz']
    omissions = ['\.bak$']  # to be ignored for production files

    #First obtain valid list of unpacked catalog files in directory
    poolCatalogList = []

    for pattern in patterns:
        fileList = glob.glob(os.path.join(directory, pattern))
        for fname in fileList:
            if fname.endswith('.bak'):
                gLogger.verbose('Ignoring BAK file: %s' % fname)
            elif tarfile.is_tarfile(fname):
                try:
                    gLogger.debug('Unpacking catalog XML file %s' %
                                  (os.path.join(directory, fname)))
                    tarFile = tarfile.open(os.path.join(directory, fname), 'r')
                    for member in tarFile.getmembers():
                        tarFile.extract(member, directory)
                        poolCatalogList.append(
                            os.path.join(directory, member.name))
                except Exception, x:
                    gLogger.error('Could not untar %s with exception %s' %
                                  (fname, str(x)))
            else:
                poolCatalogList.append(fname)
Example #56
0
  def __generateStorageObject( self, storageName, protocolName, protocol, path = None,
                              host = None, port = None, spaceToken = None, wsUrl = None, parameters={} ):
    
    storageType = protocolName
    if self.proxy:
      storageType = 'Proxy'
    
    moduleRootPaths = getInstalledExtensions()
    moduleLoaded = False
    path = path.rstrip( '/' )
    if not path:
      path = '/'
    for moduleRootPath in moduleRootPaths:
      if moduleLoaded:
        break
      gLogger.verbose( "Trying to load from root path %s" % moduleRootPath )
      moduleFile = os.path.join( rootPath, moduleRootPath, "Resources", "Storage", "%sStorage.py" % storageType )
      gLogger.verbose( "Looking for file %s" % moduleFile )
      if not os.path.isfile( moduleFile ):
        continue
      try:
        # This inforces the convention that the plug in must be named after the protocol
        moduleName = "%sStorage" % ( storageType )
        storageModule = __import__( '%s.Resources.Storage.%s' % ( moduleRootPath, moduleName ),
                                    globals(), locals(), [moduleName] )
      except Exception, x:
        errStr = "StorageFactory._generateStorageObject: Failed to import %s: %s" % ( storageName, x )
        gLogger.exception( errStr )
        return S_ERROR( errStr )

      try:
        evalString = "storageModule.%s(storageName,protocol,path,host,port,spaceToken,wsUrl)" % moduleName
        storage = eval( evalString )
        if not storage.isOK():
          errStr = "StorageFactory._generateStorageObject: Failed to instantiate storage plug in."
          gLogger.error( errStr, "%s" % ( moduleName ) )
          return S_ERROR( errStr )
      except Exception, x:
        errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s(): %s" % ( moduleName, x )
        gLogger.exception( errStr )
        return S_ERROR( errStr )
Example #57
0
 def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
                             orderAttribute = None, limit = 10000, rpc = '', url = '', timeout = 1800 ):
   """ gets all the transformation files for a transformation, incrementally.
       "limit" here is just used to determine the offset.
   """
   rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
   transformationFiles = []
   # getting transformationFiles - incrementally
   offsetToApply = 0
   while True:
     res = rpcClient.getTransformationFiles( condDict, older, newer, timeStamp, orderAttribute, limit, offsetToApply )
     if not res['OK']:
       return res
     else:
       gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
       if res['Value']:
         transformationFiles = transformationFiles + res['Value']
         offsetToApply += limit
       if len( res['Value'] ) < limit:
         break
   return S_OK( transformationFiles )
Example #58
0
    def __getOutputLFNs(self,
                        prodID='12345',
                        prodJobID='6789',
                        prodXMLFile=''):
        """ Will construct the output LFNs for the production for visual inspection.
    """
        if not prodXMLFile:
            gLogger.verbose('Using workflow object to generate XML file')
            prodXMLFile = self.__createWorkflow()

        job = LHCbJob(prodXMLFile)
        result = preSubmissionLFNs(
            job._getParameters(),
            job.workflow.createCode(),  # pylint: disable=protected-access
            productionID=prodID,
            jobID=prodJobID)
        if not result['OK']:
            return result
        lfns = result['Value']
        gLogger.verbose(lfns)
        return result
Example #59
0
  def getPayloadProxyFromDIRACGroup(self, userDN, userGroup, requiredTimeLeft, token=None, proxyToConnect=None):
    """ Download a payload proxy with VOMS extensions depending on the group

        :param str userDN: user DN
        :param str userGroup: user group
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param str token: valid token to get a proxy
        :param X509Chain proxyToConnect: proxy as a chain

        :return: S_OK(X509Chain)/S_ERROR()
    """
    # Assign VOMS attribute
    vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
    if not vomsAttr:
      gLogger.verbose("No voms attribute assigned to group %s when requested payload proxy" % userGroup)
      return self.downloadProxy(userDN, userGroup, limited=True, requiredTimeLeft=requiredTimeLeft,
                                proxyToConnect=proxyToConnect, token=token)
    else:
      return self.downloadVOMSProxy(userDN, userGroup, limited=True, requiredTimeLeft=requiredTimeLeft,
                                    requiredVOMSAttribute=vomsAttr, proxyToConnect=proxyToConnect,
                                    token=token)
Example #60
0
    def getProductionTransformations(self,
                                     prodName,
                                     condDict=None,
                                     older=None,
                                     newer=None,
                                     timeStamp=None,
                                     orderAttribute=None,
                                     limit=10000):
        """ Gets all the production transformations for a production, incrementally.
        "limit" here is just used to determine the offset.

    :param str prodName: the production name
    :return: the list of the transformations associated to the production
    """

        rpcClient = self._getRPC()
        productionTransformations = []

        if condDict is None:
            condDict = {}
        if timeStamp is None:
            timeStamp = 'CreationTime'
        # getting productionTransformations - incrementally
        offsetToApply = 0
        while True:
            res = rpcClient.getProductionTransformations(
                prodName, condDict, older, newer, timeStamp, orderAttribute,
                limit, offsetToApply)
            if not res['OK']:
                return res
            else:
                gLogger.verbose("Result for limit %d, offset %d: %d" %
                                (limit, offsetToApply, len(res['Value'])))
                if res['Value']:
                    productionTransformations = productionTransformations + res[
                        'Value']
                    offsetToApply += limit
                if len(res['Value']) < limit:
                    break
        return S_OK(productionTransformations)