예제 #1
0
class GridPilotDirector( PilotDirector ):
  """
    Base Grid PilotDirector class
    Derived classes must declare:
      self.Middleware: It must correspond to the string before "PilotDirector".
        (For proper naming of the logger)
      self.ResourceBrokers: list of Brokers used by the Director.
        (For proper error reporting)
  """
  def __init__( self, submitPool ):
    """
     Define some defaults and call parent __init__
    """
    self.gridEnv = GRIDENV

    self.cpuPowerRef = CPU_POWER_REF
    self.requirements = REQUIREMENTS
    self.rank = RANK
    self.fuzzyRank = FUZZY_RANK

    self.__failingWMSCache = DictCache()
    self.__ticketsWMSCache = DictCache()
    self.__listMatchWMSCache = DictCache()

    PilotDirector.__init__( self, submitPool )

  def configure( self, csSection, submitPool ):
    """
     Here goes common configuration for all Grid PilotDirectors
    """
    PilotDirector.configure( self, csSection, submitPool )
    self.reloadConfiguration( csSection, submitPool )

    self.__failingWMSCache.purgeExpired()
    self.__ticketsWMSCache.purgeExpired()
    for rb in self.__failingWMSCache.getKeys():
      if rb in self.resourceBrokers:
        try:
          self.resourceBrokers.remove( rb )
        except:
          pass

    self.resourceBrokers = List.randomize( self.resourceBrokers )

    if self.gridEnv:
      self.log.info( ' GridEnv:        ', self.gridEnv )
    if self.resourceBrokers:
      self.log.info( ' ResourceBrokers:', ', '.join( self.resourceBrokers ) )

  def configureFromSection( self, mySection ):
    """
      reload from CS
    """
    PilotDirector.configureFromSection( self, mySection )

    self.gridEnv = gConfig.getValue( mySection + '/GridEnv', self.gridEnv )
    if not self.gridEnv:
      # No specific option found, try a general one
      setup = gConfig.getValue( '/DIRAC/Setup', '' )
      if setup:
        instance = gConfig.getValue( '/DIRAC/Setups/%s/WorkloadManagement' % setup, '' )
        if instance:
          self.gridEnv = gConfig.getValue( '/Systems/WorkloadManagement/%s/GridEnv' % instance, '' )

    self.resourceBrokers = gConfig.getValue( mySection + '/ResourceBrokers'      , self.resourceBrokers )

    self.cpuPowerRef = gConfig.getValue( mySection + '/CPUPowerRef'           , self.cpuPowerRef )
    self.requirements = gConfig.getValue( mySection + '/Requirements'         , self.requirements )
    self.rank = gConfig.getValue( mySection + '/Rank'                 , self.rank )
    self.fuzzyRank = gConfig.getValue( mySection + '/FuzzyRank'            , self.fuzzyRank )

  def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
                     ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
    """
      This method does the actual pilot submission to the Grid RB
      The logic is as follows:
      - If there are no available RB it return error
      - If there is no VOMS extension in the proxy, return error
      - It creates a temp directory
      - Prepare a JDL
        it has some part common to gLite and LCG (the payload description)
        it has some part specific to each middleware
    """
    taskQueueID = taskQueueDict['TaskQueueID']
    # ownerDN = taskQueueDict['OwnerDN']
    credDict = proxy.getCredentials()['Value']
    ownerDN = credDict['identity']
    ownerGroup = credDict[ 'group' ]

    if not self.resourceBrokers:
      # Since we can exclude RBs from the list, it may become empty
      return S_ERROR( ERROR_RB )

    # Need to get VOMS extension for the later interactions with WMS
    ret = gProxyManager.getVOMSAttributes( proxy )
    if not ret['OK']:
      self.log.error( ERROR_VOMS, ret['Message'] )
      return S_ERROR( ERROR_VOMS )
    if not ret['Value']:
      return S_ERROR( ERROR_VOMS )
    vomsGroup = ret['Value'][0]

    workingDirectory = tempfile.mkdtemp( prefix = 'TQ_%s_' % taskQueueID, dir = workDir )
    self.log.verbose( 'Using working Directory:', workingDirectory )

    # Write JDL
    retDict = self._prepareJDL( taskQueueDict, workingDirectory, pilotOptions, pilotsPerJob,
                                ceMask, submitPrivatePilot, privateTQ )
    jdl = retDict['JDL']
    pilotRequirements = retDict['Requirements']
    rb = retDict['RB']
    if not jdl:
      try:
        shutil.rmtree( workingDirectory )
      except:
        pass
      return S_ERROR( ERROR_JDL )

    # Check that there are available queues for the Job:
    if self.enableListMatch:
      availableCEs = []
      now = Time.dateTime()
      availableCEs = self.listMatchCache.get( pilotRequirements )
      if availableCEs == False:
        availableCEs = self._listMatch( proxy, jdl, taskQueueID, rb )
        if availableCEs != False:
          self.log.verbose( 'LastListMatch', now )
          self.log.verbose( 'AvailableCEs ', availableCEs )
          self.listMatchCache.add( pilotRequirements, self.listMatchDelay * 60,
                                   value = availableCEs )                      # it is given in minutes
      if not availableCEs:
        try:
          shutil.rmtree( workingDirectory )
        except:
          pass
        return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )

    # Now we are ready for the actual submission, so

    self.log.verbose( 'Submitting Pilots for TaskQueue', taskQueueID )
    submitRet = self._submitPilot( proxy, pilotsPerJob, jdl, taskQueueID, rb )
    try:
      shutil.rmtree( workingDirectory )
    except:
      pass
    if not submitRet:
      return S_ERROR( 'Pilot Submission Failed for TQ %d ' % taskQueueID )
    # pilotReference, resourceBroker = submitRet

    submittedPilots = 0

    if pilotsPerJob != 1 and len( submitRet ) != pilotsPerJob:
      # Parametric jobs are used
      for pilotReference, resourceBroker in submitRet:
        pilotReference = self._getChildrenReferences( proxy, pilotReference, taskQueueID )
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      ownerGroup, resourceBroker, self.gridMiddleware,
                      pilotRequirements )
    else:
      for pilotReference, resourceBroker in submitRet:
        pilotReference = [pilotReference]
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      ownerGroup, resourceBroker, self.gridMiddleware, pilotRequirements )

    # add some sleep here
    time.sleep( 0.1 * submittedPilots )

    if pilotsToSubmit > pilotsPerJob:
      # Additional submissions are necessary, need to get a new token and iterate.
      pilotsToSubmit -= pilotsPerJob
      result = gProxyManager.requestToken( ownerDN, ownerGroup, max( pilotsToSubmit, self.maxJobsInFillMode ) )
      if not result[ 'OK' ]:
        self.log.error( ERROR_TOKEN, result['Message'] )
        result = S_ERROR( ERROR_TOKEN )
        result['Value'] = submittedPilots
        return result
      ( token, numberOfUses ) = result[ 'Value' ]
      for option in pilotOptions:
        if option.find( '-o /Security/ProxyToken=' ) == 0:
          pilotOptions.remove( option )
      pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
      pilotsPerJob = max( 1, min( pilotsPerJob, int( numberOfUses / self.maxJobsInFillMode ) ) )
      result = self._submitPilots( workDir, taskQueueDict, pilotOptions,
                                   pilotsToSubmit, ceMask,
                                   submitPrivatePilot, privateTQ,
                                   proxy, pilotsPerJob )
      if not result['OK']:
        if 'Value' not in result:
          result['Value'] = 0
        result['Value'] += submittedPilots
        return result
      submittedPilots += result['Value']

    return S_OK( submittedPilots )

  def _prepareJDL( self, taskQueueDict, workingDirectory, pilotOptions, pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ ):
    """
      This method should be overridden in a subclass
    """
    self.log.error( '_prepareJDL() method should be implemented in a subclass' )
    sys.exit()

  def _JobJDL( self, taskQueueDict, pilotOptions, ceMask ):
    """
     The Job JDL is the same for LCG and GLite
    """
    pilotJDL = 'Executable     = "%s";\n' % os.path.basename( self.pilot )
    executable = self.pilot

    pilotJDL += 'Arguments     = "%s";\n' % ' '.join( pilotOptions )

    pilotJDL += 'CPUTimeRef    = %s;\n' % taskQueueDict['CPUTime']

    pilotJDL += 'CPUPowerRef   = %s;\n' % self.cpuPowerRef

    pilotJDL += """CPUWorkRef    = real( CPUTimeRef * CPUPowerRef );

Lookup        = "CPUScalingReferenceSI00=*";
cap = isList( other.GlueCECapability ) ? other.GlueCECapability : { "dummy" };
i0 = regexp( Lookup, cap[0] ) ? 0 : undefined;
i1 = isString( cap[1] ) && regexp( Lookup, cap[1] ) ? 1 : i0;
i2 = isString( cap[2] ) && regexp( Lookup, cap[2] ) ? 2 : i1;
i3 = isString( cap[3] ) && regexp( Lookup, cap[3] ) ? 3 : i2;
i4 = isString( cap[4] ) && regexp( Lookup, cap[4] ) ? 4 : i3;
i5 = isString( cap[5] ) && regexp( Lookup, cap[5] ) ? 5 : i4;
index = isString( cap[6] ) && regexp( Lookup, cap[6] ) ? 6 : i5;
i = isUndefined( index ) ? 0 : index;

QueuePowerRef = real( ! isUndefined( index ) ? int( substr( cap[i], size( Lookup ) - 1 ) ) : other.GlueHostBenchmarkSI00 );
QueueTimeRef  = real( other.GlueCEPolicyMaxCPUTime * 60 );
QueueWorkRef  = QueuePowerRef * QueueTimeRef;
"""

    requirements = list( self.requirements )
    if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']:
      # if there an explicit Grig CE requested by the TQ, remove the Ranking requirement
      for req in self.requirements:
        if req.strip().lower()[:6] == 'rank >':
          requirements.remove( req )

    requirements.append( 'QueueWorkRef > CPUWorkRef' )

    siteRequirements = '\n || '.join( [ 'other.GlueCEInfoHostName == "%s"' % s for s in ceMask ] )
    requirements.append( "( %s\n )" % siteRequirements )

    pilotRequirements = '\n && '.join( requirements )

    pilotJDL += 'pilotRequirements  = %s;\n' % pilotRequirements

    pilotJDL += 'Rank          = %s;\n' % self.rank
    pilotJDL += 'FuzzyRank     = %s;\n' % self.fuzzyRank
    pilotJDL += 'StdOutput     = "%s";\n' % outputSandboxFiles[0]
    pilotJDL += 'StdError      = "%s";\n' % outputSandboxFiles[1]

    pilotJDL += 'InputSandbox  = { "%s" };\n' % '", "'.join( [ self.install, executable ] )

    pilotJDL += 'OutputSandbox = { %s };\n' % ', '.join( [ '"%s"' % f for f in outputSandboxFiles ] )

    self.log.verbose( pilotJDL )

    return ( pilotJDL, pilotRequirements )


  def parseListMatchStdout( self, proxy, cmd, taskQueueID, rb ):
    """
      Parse List Match stdout to return list of matched CE's
    """
    self.log.verbose( 'Executing List Match for TaskQueue', taskQueueID )

    start = time.time()
    ret = executeGridCommand( proxy, cmd, self.gridEnv )

    if not ret['OK']:
      self.log.error( 'Failed to execute List Match:', ret['Message'] )
      self.__sendErrorMail( rb, 'List Match', cmd, ret, proxy )
      return False
    if ret['Value'][0] != 0:
      self.log.error( 'Error executing List Match:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.__sendErrorMail( rb, 'List Match', cmd, ret, proxy )
      return False
    self.log.info( 'List Match Execution Time: %.2f for TaskQueue %d' % ( ( time.time() - start ), taskQueueID ) )

    stdout = ret['Value'][1]
    stderr = ret['Value'][2]
    availableCEs = []
    # Parse std.out
    for line in List.fromChar( stdout, '\n' ):
      if re.search( '/jobmanager-', line ) or re.search( '/cream-', line ):
        # TODO: the line has to be stripped from extra info
        availableCEs.append( line )

    if not availableCEs:
      self.log.info( 'List-Match failed to find CEs for TaskQueue', taskQueueID )
      self.log.info( stdout )
      self.log.info( stderr )
    else:
      self.log.debug( 'List-Match returns:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.log.info( 'List-Match found %s CEs for TaskQueue' % len( availableCEs ), taskQueueID )
      self.log.verbose( ', '.join( availableCEs ) )


    return availableCEs

  def parseJobSubmitStdout( self, proxy, cmd, taskQueueID, rb ):
    """
      Parse Job Submit stdout to return pilot reference
    """
    start = time.time()
    self.log.verbose( 'Executing Job Submit for TaskQueue', taskQueueID )

    ret = executeGridCommand( proxy, cmd, self.gridEnv )

    if not ret['OK']:
      self.log.error( 'Failed to execute Job Submit:', ret['Message'] )
      self.__sendErrorMail( rb, 'Job Submit', cmd, ret, proxy )
      return False
    if ret['Value'][0] != 0:
      self.log.error( 'Error executing Job Submit:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.__sendErrorMail( rb, 'Job Submit', cmd, ret, proxy )
      return False
    self.log.info( 'Job Submit Execution Time: %.2f for TaskQueue %d' % ( ( time.time() - start ), taskQueueID ) )

    stdout = ret['Value'][1]
    stderr = ret['Value'][2]

    submittedPilot = None

    failed = 1
    rb = ''
    for line in List.fromChar( stdout, '\n' ):
      m = re.search( "(https:\S+)", line )
      if ( m ):
        glite_id = m.group( 1 )
        submittedPilot = glite_id
        if not rb:
          m = re.search( "https://(.+):.+", glite_id )
          rb = m.group( 1 )
        failed = 0
    if failed:
      self.log.error( 'Job Submit returns no Reference:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      return False

    self.log.info( 'Reference %s for TaskQueue %s' % ( glite_id, taskQueueID ) )

    return glite_id, rb

  def _writeJDL( self, filename, jdlList ):
    try:
      f = open( filename, 'w' )
      f.write( '\n'.join( jdlList ) )
      f.close()
    except Exception, x:
      self.log.exception()
      return ''

    return filename
예제 #2
0
class DIRACPilotDirector(PilotDirector):
  """
    DIRAC PilotDirector class
  """
  def __init__( self, submitPool ):
    """
     Define some defaults and call parent __init__
    """
    self.gridMiddleware    = 'DIRAC'

    PilotDirector.__init__( self, submitPool )

    self.computingElementList = COMPUTING_ELEMENTS
    self.computingElementDict = {}
    self.addComputingElement( self.computingElementList )

    self.siteName          = gConfig.getValue('/LocalSite/Site','')
    if not self.siteName:
      self.log.error( 'Can not run a Director if Site Name is not defined' )
      sys.exit()

    self.__failingCECache  = DictCache()
    self.__ticketsCECache  = DictCache()

  def configure(self, csSection, submitPool ):
    """
     Here goes common configuration for DIRAC PilotDirector
    """

    PilotDirector.configure( self, csSection, submitPool )
    self.reloadConfiguration( csSection, submitPool )

    self.__failingCECache.purgeExpired()
    self.__ticketsCECache.purgeExpired()

    for ce in self.__failingCECache.getKeys():
      if ce in self.computingElementDict.keys():
        try:
          del self.computingElementDict[ce]
        except:
          pass
    if self.computingElementDict:
      self.log.info( ' ComputingElements:', ', '.join(self.computingElementDict.keys()) )
    else:
      return

    # FIXME: this is to start testing
    ceName, computingElementDict = self.computingElementDict.items()[0]

    self.computingElement = computingElementDict['CE']

    self.log.debug( self.computingElement.getDynamicInfo() )

    self.log.info( ' SiteName:', self.siteName )


  def configureFromSection( self, mySection ):
    """
      reload from CS
    """
    PilotDirector.configureFromSection( self, mySection )

    self.computingElementList = gConfig.getValue( mySection+'/ComputingElements'      , self.computingElementList )
    self.addComputingElement( self.computingElementList )

    self.siteName             = gConfig.getValue( mySection+'/SiteName'               , self.siteName )


  def addComputingElement(self, ceList):
    """
      Check if a CE object for the current CE is available,
      instantiate one if necessary
    """
    for CE in ceList:
      if CE not in self.computingElementDict:
        ceFactory = ComputingElementFactory( )
        ceInstance = ceFactory.getCE( ceName = CE )
        if not ceInstance['OK']:
          self.log.error('Can not create CE object:', ceInstance['Message'])
          return
        self.computingElementDict[CE] = ceInstance['Value'].ceConfigDict
        # add the 'CE' instance at the end to avoid being overwritten
        self.computingElementDict[CE]['CE'] = ceInstance['Value']


  def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
                     ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
    """
      This method does the actual pilot submission to the DIRAC CE
      The logic is as follows:
      - If there are no available CE it return error
      - If there is no queue available in the CE's, it returns error
      - It creates a temp directory
      - It prepare a PilotScript
    """

    taskQueueID = taskQueueDict['TaskQueueID']
    ownerDN = taskQueueDict['OwnerDN']

    submittedPilots = 0

    # if self.computingElement not in self.computingElementDict:
    #  # Since we can exclude CEs from the list, it may become empty
    #  return S_ERROR( ERROR_CE )

    pilotRequirements = []
    pilotRequirements.append( ( 'CPUTime', taskQueueDict['CPUTime'] ) )
    # do we need to care about anything else?
    pilotRequirementsString = str( pilotRequirements )

    # Check that there are available queues for the Jobs:
    if self.enableListMatch:
      availableQueues = []
      # now = Time.dateTime()
      cachedAvailableQueues = self.listMatchCache.get( pilotRequirementsString )
      if cachedAvailableQueues == False:
        availableQueues = self._listQueues( pilotRequirements )
        if availableQueues != False:
          self.listMatchCache.add( pilotRequirementsString, self.listMatchDelay, availableQueues )
          self.log.verbose( 'Available Queues for TaskQueue ',  "%s: %s" % ( taskQueueID, str(availableQueues) ) )
      else:
        availableQueues = cachedAvailableQueues

    if not availableQueues:
      return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )

    baseDir = os.getcwd()
    workingDirectory = tempfile.mkdtemp( prefix= 'TQ_%s_' % taskQueueID, dir = workDir )
    self.log.verbose( 'Using working Directory:', workingDirectory )
    os.chdir( workingDirectory )

    # set the Site Name
    pilotOptions.append( "-n '%s'" % self.siteName)

    # submit pilots for every CE available
    
    for CE in self.computingElementDict.keys():
      ceName = CE
      computingElement = self.computingElementDict[CE]['CE']
      
      # add possible requirements from Site and CE
      for req, val in getResourceDict( ceName ).items():
        pilotOptions.append( "-o '/AgentJobRequirements/%s=%s'" % ( req, val ) )
        
      ceConfigDict = self.computingElementDict[CE]

      if 'ClientPlatform' in ceConfigDict:
        pilotOptions.append( "-p '%s'" % ceConfigDict['ClientPlatform'])
  
      if 'SharedArea' in ceConfigDict:
        pilotOptions.append( "-o '/LocalSite/SharedArea=%s'" % ceConfigDict['SharedArea'] )
  
      if 'CPUScalingFactor' in ceConfigDict:
        pilotOptions.append( "-o '/LocalSite/CPUScalingFactor=%s'" % ceConfigDict['CPUScalingFactor'] )

      if 'CPUNormalizationFactor' in ceConfigDict:
        pilotOptions.append( "-o '/LocalSite/CPUNormalizationFactor=%s'" % ceConfigDict['CPUNormalizationFactor'] )

        self.log.info( "pilotOptions: ", ' '.join(pilotOptions))

      httpProxy = ''
      if 'HttpProxy' in ceConfigDict:
        httpProxy = ceConfigDict['HttpProxy']
      
      pilotDir = ''
      if 'JobExecDir' in ceConfigDict:
        pilotExecDir = ceConfigDict['JobExecDir']
  
      try:
        pilotScript = self._writePilotScript( workingDirectory, pilotOptions, proxy, httpProxy, pilotExecDir )
      except:
        self.log.exception( ERROR_SCRIPT )
        try:
          os.chdir( baseDir )
          shutil.rmtree( workingDirectory )
        except:
          pass
        return S_ERROR( ERROR_SCRIPT )
  
      self.log.info("Pilots to submit: ", pilotsToSubmit)
      while submittedPilots < pilotsToSubmit:
        # Find out how many pilots can be submitted
        ret = computingElement.available( )
        if not ret['OK']:
          self.log.error('Can not determine if pilot should be submitted: ', ret['Message'])
          break
        maxPilotsToSubmit = ret['Value']
        self.log.info("Submit Pilots: ", maxPilotsToSubmit)
        if not maxPilotsToSubmit:
          break
        # submit the pilots and then check again
        for i in range( min(maxPilotsToSubmit,pilotsToSubmit-submittedPilots) ):
          submission = computingElement.submitJob(pilotScript, '', '')
          if not submission['OK']:
            self.log.error('Pilot submission failed: ', submission['Message'])
            # cleanup
            try:
              os.chdir( baseDir )
              shutil.rmtree( workingDirectory )
            except:
              pass
            return S_ERROR('Pilot submission failed after ' + str(submittedPilots) + ' pilots submitted successful')
          submittedPilots += 1
          # let the batch system some time to digest the submitted job
          time.sleep(1)
          
      #next CE

    try:
      os.chdir( baseDir )
      shutil.rmtree( workingDirectory )
    except:
      pass

    return S_OK(submittedPilots)

  def _listQueues( self, pilotRequirements ):
    """
     For each defined CE return the list of Queues with available, running and waiting slots,
     matching the requirements of the pilots.
     Currently only CPU time is considered
    """
    availableQueues = []
    result = self.computingElement.available( pilotRequirements )
    if not result['OK']:
      self.log.error( 'Can not determine available queues', result['Message'] )
      return False
    return result['Value']


  def _writePilotScript( self, workingDirectory, pilotOptions, proxy, httpProxy, pilotExecDir ):
    """
     Prepare the script to execute the pilot
     For the moment it will do like Grid Pilots, a full DIRAC installation

     It assumes that the pilot script will have access to the submit working directory
    """
    try:
      compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace('\n','')
      compressedAndEncodedPilot = base64.encodestring( bz2.compress( open( self.pilot, "rb" ).read(), 9 ) ).replace('\n','')
      compressedAndEncodedInstall = base64.encodestring( bz2.compress( open( self.install, "rb" ).read(), 9 ) ).replace('\n','')
    except:
      self.log.exception('Exception during file compression of proxy, dirac-pilot or dirac-install')
      return S_ERROR('Exception during file compression of proxy, dirac-pilot or dirac-install')

    localPilot = """#!/bin/bash
/usr/bin/env python << EOF
#
import os, tempfile, sys, shutil, base64, bz2
try:
  pilotExecDir = '%(pilotExecDir)s'
  if not pilotExecDir:
    pilotExecDir = None 
  pilotWorkingDirectory = tempfile.mkdtemp( suffix = 'pilot', prefix = 'DIRAC_', dir = pilotExecDir )
  os.chdir( pilotWorkingDirectory )
  open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
  open( '%(pilotScript)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedPilot)s" ) ) )
  open( '%(installScript)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedInstall)s" ) ) )
  os.chmod("proxy",0600)
  os.chmod("%(pilotScript)s",0700)
  os.chmod("%(installScript)s",0700)
  if "LD_LIBRARY_PATH" not in os.environ:
    os.environ["LD_LIBRARY_PATH"]=""
  os.environ["X509_USER_PROXY"]=os.path.join(pilotWorkingDirectory, 'proxy')
  if "%(httpProxy)s":
    os.environ["HTTP_PROXY"]="%(httpProxy)s"
  os.environ["X509_CERT_DIR"]=os.path.join(pilotWorkingDirectory, 'etc/grid-security/certificates')
  # TODO: structure the output
  print '==========================================================='
  print 'Environment of execution host'
  for key in os.environ.keys():
    print key + '=' + os.environ[key]
  print '==========================================================='
except Exception, x:
  print >> sys.stderr, x
  sys.exit(-1)
cmd = "python %(pilotScript)s %(pilotOptions)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )

shutil.rmtree( pilotWorkingDirectory )

EOF
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, 
        'compressedAndEncodedPilot': compressedAndEncodedPilot, 
        'compressedAndEncodedInstall': compressedAndEncodedInstall, 
        'httpProxy': httpProxy, 
        'pilotScript': os.path.basename(self.pilot), 
        'installScript': os.path.basename(self.install),
        'pilotOptions': ' '.join( pilotOptions ),
        'pilotExecDir': pilotExecDir }

    fd, name = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir=workingDirectory)
    pilotWrapper = os.fdopen(fd, 'w')
    pilotWrapper.write( localPilot )
    pilotWrapper.close()

    return name

  def _getPilotProxyFromDIRACGroup( self, ownerDN, ownerGroup, requiredTimeLeft ):
    """
    Download a limited pilot proxy with VOMS extensions depending on the group
    """
    #Assign VOMS attribute
    vomsAttr = CS.getVOMSAttributeForGroup( ownerGroup )
    if not vomsAttr:
      self.log.info( "Downloading a proxy without VOMS extensions" )
      return gProxyManager.downloadProxy( ownerDN, ownerGroup, limited = True,
                                          requiredTimeLeft = requiredTimeLeft )
    else:
      self.log.info( "Downloading a proxy with '%s' VOMS extension" % vomsAttr )
      return gProxyManager.downloadVOMSProxy( ownerDN,
                                     ownerGroup,
                                     limited = True,
                                     requiredTimeLeft = requiredTimeLeft,
                                     requiredVOMSAttribute = vomsAttr )
예제 #3
0
class DIRACPilotDirector(PilotDirector):
    """
    DIRAC PilotDirector class
  """
    def __init__(self, submitPool):
        """
     Define some defaults and call parent __init__
    """
        self.gridMiddleware = 'DIRAC'

        PilotDirector.__init__(self, submitPool)

        self.computingElementList = COMPUTING_ELEMENTS
        self.computingElementDict = {}
        self.addComputingElement(self.computingElementList)

        self.siteName = gConfig.getValue('/LocalSite/Site', '')
        if not self.siteName:
            self.log.error(
                'Can not run a Director if Site Name is not defined')
            sys.exit()

        self.__failingCECache = DictCache()
        self.__ticketsCECache = DictCache()

    def configure(self, csSection, submitPool):
        """
     Here goes common configuration for DIRAC PilotDirector
    """

        PilotDirector.configure(self, csSection, submitPool)
        self.reloadConfiguration(csSection, submitPool)

        self.__failingCECache.purgeExpired()
        self.__ticketsCECache.purgeExpired()

        for ce in self.__failingCECache.getKeys():
            if ce in self.computingElementDict.keys():
                try:
                    del self.computingElementDict[ce]
                except:
                    pass
        if self.computingElementDict:
            self.log.info(' ComputingElements:',
                          ', '.join(self.computingElementDict.keys()))
        else:
            return

        # FIXME: this is to start testing
        ceName, computingElementDict = self.computingElementDict.items()[0]

        self.computingElement = computingElementDict['CE']

        self.log.debug(self.computingElement.getDynamicInfo())

        self.log.info(' SiteName:', self.siteName)

    def configureFromSection(self, mySection):
        """
      reload from CS
    """
        PilotDirector.configureFromSection(self, mySection)

        self.computingElementList = gConfig.getValue(
            mySection + '/ComputingElements', self.computingElementList)
        self.addComputingElement(self.computingElementList)

        self.siteName = gConfig.getValue(mySection + '/SiteName',
                                         self.siteName)

    def addComputingElement(self, ceList):
        """
      Check if a CE object for the current CE is available,
      instantiate one if necessary
    """
        for CE in ceList:
            if CE not in self.computingElementDict:
                ceFactory = ComputingElementFactory()
                ceInstance = ceFactory.getCE(ceName=CE)
                if not ceInstance['OK']:
                    self.log.error('Can not create CE object:',
                                   ceInstance['Message'])
                    return
                self.computingElementDict[CE] = ceInstance[
                    'Value'].ceConfigDict
                # add the 'CE' instance at the end to avoid being overwritten
                self.computingElementDict[CE]['CE'] = ceInstance['Value']

    def _submitPilots(self, workDir, taskQueueDict, pilotOptions,
                      pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ,
                      proxy, pilotsPerJob):
        """
      This method does the actual pilot submission to the DIRAC CE
      The logic is as follows:
      - If there are no available CE it return error
      - If there is no queue available in the CE's, it returns error
      - It creates a temp directory
      - It prepare a PilotScript
    """

        taskQueueID = taskQueueDict['TaskQueueID']
        ownerDN = taskQueueDict['OwnerDN']

        submittedPilots = 0

        # if self.computingElement not in self.computingElementDict:
        #  # Since we can exclude CEs from the list, it may become empty
        #  return S_ERROR( ERROR_CE )

        pilotRequirements = []
        pilotRequirements.append(('CPUTime', taskQueueDict['CPUTime']))
        # do we need to care about anything else?
        pilotRequirementsString = str(pilotRequirements)

        # Check that there are available queues for the Jobs:
        if self.enableListMatch:
            availableQueues = []
            # now = Time.dateTime()
            cachedAvailableQueues = self.listMatchCache.get(
                pilotRequirementsString)
            if cachedAvailableQueues == False:
                availableQueues = self._listQueues(pilotRequirements)
                if availableQueues != False:
                    self.listMatchCache.add(pilotRequirementsString,
                                            self.listMatchDelay,
                                            availableQueues)
                    self.log.verbose(
                        'Available Queues for TaskQueue ',
                        "%s: %s" % (taskQueueID, str(availableQueues)))
            else:
                availableQueues = cachedAvailableQueues

        if not availableQueues:
            return S_ERROR(ERROR_CE + ' TQ: %d' % taskQueueID)

        baseDir = os.getcwd()
        workingDirectory = tempfile.mkdtemp(prefix='TQ_%s_' % taskQueueID,
                                            dir=workDir)
        self.log.verbose('Using working Directory:', workingDirectory)
        os.chdir(workingDirectory)

        # set the Site Name
        pilotOptions.append("-n '%s'" % self.siteName)

        # submit pilots for every CE available

        for CE in self.computingElementDict.keys():
            ceName = CE
            computingElement = self.computingElementDict[CE]['CE']

            # add possible requirements from Site and CE
            for req, val in getResourceDict(ceName).items():
                pilotOptions.append("-o '/AgentJobRequirements/%s=%s'" %
                                    (req, val))

            ceConfigDict = self.computingElementDict[CE]

            if 'ClientPlatform' in ceConfigDict:
                pilotOptions.append("-p '%s'" % ceConfigDict['ClientPlatform'])

            if 'SharedArea' in ceConfigDict:
                pilotOptions.append("-o '/LocalSite/SharedArea=%s'" %
                                    ceConfigDict['SharedArea'])

            if 'CPUScalingFactor' in ceConfigDict:
                pilotOptions.append("-o '/LocalSite/CPUScalingFactor=%s'" %
                                    ceConfigDict['CPUScalingFactor'])

            if 'CPUNormalizationFactor' in ceConfigDict:
                pilotOptions.append(
                    "-o '/LocalSite/CPUNormalizationFactor=%s'" %
                    ceConfigDict['CPUNormalizationFactor'])

                self.log.info("pilotOptions: ", ' '.join(pilotOptions))

            httpProxy = ''
            if 'HttpProxy' in ceConfigDict:
                httpProxy = ceConfigDict['HttpProxy']

            pilotDir = ''
            if 'JobExecDir' in ceConfigDict:
                pilotExecDir = ceConfigDict['JobExecDir']

            try:
                pilotScript = self._writePilotScript(workingDirectory,
                                                     pilotOptions, proxy,
                                                     httpProxy, pilotExecDir)
            except:
                self.log.exception(ERROR_SCRIPT)
                try:
                    os.chdir(baseDir)
                    shutil.rmtree(workingDirectory)
                except:
                    pass
                return S_ERROR(ERROR_SCRIPT)

            self.log.info("Pilots to submit: ", pilotsToSubmit)
            while submittedPilots < pilotsToSubmit:
                # Find out how many pilots can be submitted
                ret = computingElement.available()
                if not ret['OK']:
                    self.log.error(
                        'Can not determine if pilot should be submitted: ',
                        ret['Message'])
                    break
                maxPilotsToSubmit = ret['Value']
                self.log.info("Submit Pilots: ", maxPilotsToSubmit)
                if not maxPilotsToSubmit:
                    break
                # submit the pilots and then check again
                for i in range(
                        min(maxPilotsToSubmit,
                            pilotsToSubmit - submittedPilots)):
                    submission = computingElement.submitJob(
                        pilotScript, '', '')
                    if not submission['OK']:
                        self.log.error('Pilot submission failed: ',
                                       submission['Message'])
                        # cleanup
                        try:
                            os.chdir(baseDir)
                            shutil.rmtree(workingDirectory)
                        except:
                            pass
                        return S_ERROR('Pilot submission failed after ' +
                                       str(submittedPilots) +
                                       ' pilots submitted successful')
                    submittedPilots += 1
                    # let the batch system some time to digest the submitted job
                    time.sleep(1)

            #next CE

        try:
            os.chdir(baseDir)
            shutil.rmtree(workingDirectory)
        except:
            pass

        return S_OK(submittedPilots)

    def _listQueues(self, pilotRequirements):
        """
     For each defined CE return the list of Queues with available, running and waiting slots,
     matching the requirements of the pilots.
     Currently only CPU time is considered
    """
        availableQueues = []
        result = self.computingElement.available(pilotRequirements)
        if not result['OK']:
            self.log.error('Can not determine available queues',
                           result['Message'])
            return False
        return result['Value']

    def _writePilotScript(self, workingDirectory, pilotOptions, proxy,
                          httpProxy, pilotExecDir):
        """
     Prepare the script to execute the pilot
     For the moment it will do like Grid Pilots, a full DIRAC installation

     It assumes that the pilot script will have access to the submit working directory
    """
        try:
            compressedAndEncodedProxy = base64.encodestring(
                bz2.compress(proxy.dumpAllToString()['Value'])).replace(
                    '\n', '')
            compressedAndEncodedPilot = base64.encodestring(
                bz2.compress(open(self.pilot, "rb").read(),
                             9)).replace('\n', '')
            compressedAndEncodedInstall = base64.encodestring(
                bz2.compress(open(self.install, "rb").read(),
                             9)).replace('\n', '')
        except:
            self.log.exception(
                'Exception during file compression of proxy, dirac-pilot or dirac-install'
            )
            return S_ERROR(
                'Exception during file compression of proxy, dirac-pilot or dirac-install'
            )

        localPilot = """#!/bin/bash
/usr/bin/env python << EOF
#
import os, tempfile, sys, shutil, base64, bz2
try:
  pilotExecDir = '%(pilotExecDir)s'
  if not pilotExecDir:
    pilotExecDir = None 
  pilotWorkingDirectory = tempfile.mkdtemp( suffix = 'pilot', prefix = 'DIRAC_', dir = pilotExecDir )
  os.chdir( pilotWorkingDirectory )
  open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
  open( '%(pilotScript)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedPilot)s" ) ) )
  open( '%(installScript)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedInstall)s" ) ) )
  os.chmod("proxy",0600)
  os.chmod("%(pilotScript)s",0700)
  os.chmod("%(installScript)s",0700)
  if "LD_LIBRARY_PATH" not in os.environ:
    os.environ["LD_LIBRARY_PATH"]=""
  os.environ["X509_USER_PROXY"]=os.path.join(pilotWorkingDirectory, 'proxy')
  if "%(httpProxy)s":
    os.environ["HTTP_PROXY"]="%(httpProxy)s"
  os.environ["X509_CERT_DIR"]=os.path.join(pilotWorkingDirectory, 'etc/grid-security/certificates')
  # TODO: structure the output
  print '==========================================================='
  print 'Environment of execution host'
  for key in os.environ.keys():
    print key + '=' + os.environ[key]
  print '==========================================================='
except Exception, x:
  print >> sys.stderr, x
  sys.exit(-1)
cmd = "python %(pilotScript)s %(pilotOptions)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )

shutil.rmtree( pilotWorkingDirectory )

EOF
""" % {
            'compressedAndEncodedProxy': compressedAndEncodedProxy,
            'compressedAndEncodedPilot': compressedAndEncodedPilot,
            'compressedAndEncodedInstall': compressedAndEncodedInstall,
            'httpProxy': httpProxy,
            'pilotScript': os.path.basename(self.pilot),
            'installScript': os.path.basename(self.install),
            'pilotOptions': ' '.join(pilotOptions),
            'pilotExecDir': pilotExecDir
        }

        fd, name = tempfile.mkstemp(suffix='_pilotwrapper.py',
                                    prefix='DIRAC_',
                                    dir=workingDirectory)
        pilotWrapper = os.fdopen(fd, 'w')
        pilotWrapper.write(localPilot)
        pilotWrapper.close()

        return name

    def _getPilotProxyFromDIRACGroup(self, ownerDN, ownerGroup,
                                     requiredTimeLeft):
        """
    Download a limited pilot proxy with VOMS extensions depending on the group
    """
        #Assign VOMS attribute
        vomsAttr = CS.getVOMSAttributeForGroup(ownerGroup)
        if not vomsAttr:
            self.log.info("Downloading a proxy without VOMS extensions")
            return gProxyManager.downloadProxy(
                ownerDN,
                ownerGroup,
                limited=True,
                requiredTimeLeft=requiredTimeLeft)
        else:
            self.log.info("Downloading a proxy with '%s' VOMS extension" %
                          vomsAttr)
            return gProxyManager.downloadVOMSProxy(
                ownerDN,
                ownerGroup,
                limited=True,
                requiredTimeLeft=requiredTimeLeft,
                requiredVOMSAttribute=vomsAttr)
예제 #4
0
class GridPilotDirector( PilotDirector ):
  """
    Base Grid PilotDirector class
    Derived classes must declare:
      self.Middleware: It must correspond to the string before "PilotDirector".
        (For proper naming of the logger)
      self.ResourceBrokers: list of Brokers used by the Director.
        (For proper error reporting)
  """
  def __init__( self, submitPool ):
    """
     Define some defaults and call parent __init__
    """
    self.gridEnv = GRIDENV

    self.cpuPowerRef = CPU_POWER_REF
    self.requirements = REQUIREMENTS
    self.rank = RANK
    self.fuzzyRank = FUZZY_RANK

    self.__failingWMSCache = DictCache()
    self.__ticketsWMSCache = DictCache()
    self.__listMatchWMSCache = DictCache()

    PilotDirector.__init__( self, submitPool )

  def configure( self, csSection, submitPool ):
    """
     Here goes common configuration for all Grid PilotDirectors
    """
    PilotDirector.configure( self, csSection, submitPool )
    self.reloadConfiguration( csSection, submitPool )

    self.__failingWMSCache.purgeExpired()
    self.__ticketsWMSCache.purgeExpired()
    for rb in self.__failingWMSCache.getKeys():
      if rb in self.resourceBrokers:
        try:
          self.resourceBrokers.remove( rb )
        except:
          pass

    self.resourceBrokers = List.randomize( self.resourceBrokers )

    if self.gridEnv:
      self.log.info( ' GridEnv:        ', self.gridEnv )
    if self.resourceBrokers:
      self.log.info( ' ResourceBrokers:', ', '.join( self.resourceBrokers ) )

  def configureFromSection( self, mySection ):
    """
      reload from CS
    """
    PilotDirector.configureFromSection( self, mySection )

    self.gridEnv = gConfig.getValue( mySection + '/GridEnv', self.gridEnv )
    if not self.gridEnv:
      # No specific option found, try a general one
      setup = gConfig.getValue( '/DIRAC/Setup', '' )
      if setup:
        instance = gConfig.getValue( '/DIRAC/Setups/%s/WorkloadManagement' % setup, '' )
        if instance:
          self.gridEnv = gConfig.getValue( '/Systems/WorkloadManagement/%s/GridEnv' % instance, '' )

    self.resourceBrokers = gConfig.getValue( mySection + '/ResourceBrokers'      , self.resourceBrokers )

    self.cpuPowerRef = gConfig.getValue( mySection + '/CPUPowerRef'           , self.cpuPowerRef )
    self.requirements = gConfig.getValue( mySection + '/Requirements'         , self.requirements )
    self.rank = gConfig.getValue( mySection + '/Rank'                 , self.rank )
    self.fuzzyRank = gConfig.getValue( mySection + '/FuzzyRank'            , self.fuzzyRank )

  def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
                     ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
    """
      This method does the actual pilot submission to the Grid RB
      The logic is as follows:
      - If there are no available RB it return error
      - If there is no VOMS extension in the proxy, return error
      - It creates a temp directory
      - Prepare a JDL
        it has some part common to gLite and LCG (the payload description)
        it has some part specific to each middleware
    """
    taskQueueID = taskQueueDict['TaskQueueID']
    # ownerDN = taskQueueDict['OwnerDN']
    ownerDN = proxy.getCredentials()['Value']['identity']


    if not self.resourceBrokers:
      # Since we can exclude RBs from the list, it may become empty
      return S_ERROR( ERROR_RB )

    # Need to get VOMS extension for the later interactions with WMS
    ret = gProxyManager.getVOMSAttributes( proxy )
    if not ret['OK']:
      self.log.error( ERROR_VOMS, ret['Message'] )
      return S_ERROR( ERROR_VOMS )
    if not ret['Value']:
      return S_ERROR( ERROR_VOMS )
    vomsGroup = ret['Value'][0]

    workingDirectory = tempfile.mkdtemp( prefix = 'TQ_%s_' % taskQueueID, dir = workDir )
    self.log.verbose( 'Using working Directory:', workingDirectory )

    # Write JDL
    retDict = self._prepareJDL( taskQueueDict, workingDirectory, pilotOptions, pilotsPerJob,
                                ceMask, submitPrivatePilot, privateTQ )
    jdl = retDict['JDL']
    pilotRequirements = retDict['Requirements']
    rb = retDict['RB']
    if not jdl:
      try:
        shutil.rmtree( workingDirectory )
      except:
        pass
      return S_ERROR( ERROR_JDL )

    # Check that there are available queues for the Job:
    if self.enableListMatch:
      availableCEs = []
      now = Time.dateTime()
      availableCEs = self.listMatchCache.get( pilotRequirements )
      if availableCEs == False:
        availableCEs = self._listMatch( proxy, jdl, taskQueueID, rb )
        if availableCEs != False:
          self.log.verbose( 'LastListMatch', now )
          self.log.verbose( 'AvailableCEs ', availableCEs )
          self.listMatchCache.add( pilotRequirements, self.listMatchDelay * 60,
                                   value = availableCEs )                      # it is given in minutes
      if not availableCEs:
        try:
          shutil.rmtree( workingDirectory )
        except:
          pass
        return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )

    # Now we are ready for the actual submission, so

    self.log.verbose( 'Submitting Pilots for TaskQueue', taskQueueID )
    submitRet = self._submitPilot( proxy, pilotsPerJob, jdl, taskQueueID, rb )
    try:
      shutil.rmtree( workingDirectory )
    except:
      pass
    if not submitRet:
      return S_ERROR( 'Pilot Submission Failed for TQ %d ' % taskQueueID )
    # pilotReference, resourceBroker = submitRet

    submittedPilots = 0

    if pilotsPerJob != 1 and len( submitRet ) != pilotsPerJob:
      # Parametric jobs are used
      for pilotReference, resourceBroker in submitRet:
        pilotReference = self._getChildrenReferences( proxy, pilotReference, taskQueueID )
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      vomsGroup, resourceBroker, self.gridMiddleware,
                      pilotRequirements )
    else:
      for pilotReference, resourceBroker in submitRet:
        pilotReference = [pilotReference]
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      vomsGroup, broker = resourceBroker, gridType = self.gridMiddleware,
                      requirements = pilotRequirements )

    # add some sleep here
    time.sleep( 0.1 * submittedPilots )

    if pilotsToSubmit > pilotsPerJob:
      # Additional submissions are necessary, need to get a new token and iterate.
      pilotsToSubmit -= pilotsPerJob
      ownerDN = self.genericPilotDN
      ownerGroup = self.genericPilotGroup
      result = gProxyManager.requestToken( ownerDN, ownerGroup, max( pilotsToSubmit, self.maxJobsInFillMode ) )
      if not result[ 'OK' ]:
        self.log.error( ERROR_TOKEN, result['Message'] )
        return S_ERROR( ERROR_TOKEN )
      ( token, numberOfUses ) = result[ 'Value' ]
      for option in pilotOptions:
        if option.find( '-o /Security/ProxyToken=' ) == 0:
          pilotOptions.remove( option )
      pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
      pilotsPerJob = min( pilotsPerJob, int( numberOfUses / self.maxJobsInFillMode ) )
      result = self._submitPilots( workDir, taskQueueDict, pilotOptions,
                                   pilotsToSubmit, ceMask,
                                   submitPrivatePilot, privateTQ,
                                   proxy, pilotsPerJob )
      if not result['OK']:
        result['Value'] = submittedPilots
        return result
      submittedPilots += result['Value']

    return S_OK( submittedPilots )

  def _prepareJDL( self, taskQueueDict, workingDirectory, pilotOptions, pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ ):
    """
      This method should be overridden in a subclass
    """
    self.log.error( '_prepareJDL() method should be implemented in a subclass' )
    sys.exit()

  def _JobJDL( self, taskQueueDict, pilotOptions, ceMask ):
    """
     The Job JDL is the same for LCG and GLite
    """
    pilotJDL = 'Executable     = "%s";\n' % os.path.basename( self.pilot )
    executable = self.pilot

    pilotJDL += 'Arguments     = "%s";\n' % ' '.join( pilotOptions )

    pilotJDL += 'CPUTimeRef    = %s;\n' % taskQueueDict['CPUTime']

    pilotJDL += 'CPUPowerRef   = %s;\n' % self.cpuPowerRef

    pilotJDL += """CPUWorkRef    = real( CPUTimeRef * CPUPowerRef );

Lookup        = "CPUScalingReferenceSI00=*";
cap = isList( other.GlueCECapability ) ? other.GlueCECapability : { "dummy" };
i0 = regexp( Lookup, cap[0] ) ? 0 : undefined;
i1 = isString( cap[1] ) && regexp( Lookup, cap[1] ) ? 1 : i0;
i2 = isString( cap[2] ) && regexp( Lookup, cap[2] ) ? 2 : i1;
i3 = isString( cap[3] ) && regexp( Lookup, cap[3] ) ? 3 : i2;
i4 = isString( cap[4] ) && regexp( Lookup, cap[4] ) ? 4 : i3;
i5 = isString( cap[5] ) && regexp( Lookup, cap[5] ) ? 5 : i4;
index = isString( cap[6] ) && regexp( Lookup, cap[6] ) ? 6 : i5;
i = isUndefined( index ) ? 0 : index;

QueuePowerRef = real( ! isUndefined( index ) ? int( substr( cap[i], size( Lookup ) - 1 ) ) : other.GlueHostBenchmarkSI00 );
QueueTimeRef  = real( other.GlueCEPolicyMaxCPUTime * 60 );
QueueWorkRef  = QueuePowerRef * QueueTimeRef;
"""

    requirements = list( self.requirements )
    if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']:
      # if there an explicit Grig CE requested by the TQ, remove the Ranking requirement
      for req in self.requirements:
        if req.strip().lower()[:6] == 'rank >':
          requirements.remove( req )

    requirements.append( 'QueueWorkRef > CPUWorkRef' )

    siteRequirements = '\n || '.join( [ 'other.GlueCEInfoHostName == "%s"' % s for s in ceMask ] )
    requirements.append( "( %s\n )" % siteRequirements )

    pilotRequirements = '\n && '.join( requirements )

    pilotJDL += 'pilotRequirements  = %s;\n' % pilotRequirements

    pilotJDL += 'Rank          = %s;\n' % self.rank
    pilotJDL += 'FuzzyRank     = %s;\n' % self.fuzzyRank
    pilotJDL += 'StdOutput     = "%s";\n' % outputSandboxFiles[0]
    pilotJDL += 'StdError      = "%s";\n' % outputSandboxFiles[1]

    pilotJDL += 'InputSandbox  = { "%s" };\n' % '", "'.join( [ self.install, executable ] )

    pilotJDL += 'OutputSandbox = { %s };\n' % ', '.join( [ '"%s"' % f for f in outputSandboxFiles ] )

    self.log.verbose( pilotJDL )

    return ( pilotJDL, pilotRequirements )


  def parseListMatchStdout( self, proxy, cmd, taskQueueID, rb ):
    """
      Parse List Match stdout to return list of matched CE's
    """
    self.log.verbose( 'Executing List Match for TaskQueue', taskQueueID )

    start = time.time()
    ret = executeGridCommand( proxy, cmd, self.gridEnv )

    if not ret['OK']:
      self.log.error( 'Failed to execute List Match:', ret['Message'] )
      self.__sendErrorMail( rb, 'List Match', cmd, ret, proxy )
      return False
    if ret['Value'][0] != 0:
      self.log.error( 'Error executing List Match:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.__sendErrorMail( rb, 'List Match', cmd, ret, proxy )
      return False
    self.log.info( 'List Match Execution Time: %.2f for TaskQueue %d' % ( ( time.time() - start ), taskQueueID ) )

    stdout = ret['Value'][1]
    stderr = ret['Value'][2]
    availableCEs = []
    # Parse std.out
    for line in List.fromChar( stdout, '\n' ):
      if re.search( '/jobmanager-', line ) or re.search( '/cream-', line ):
        # TODO: the line has to be stripped from extra info
        availableCEs.append( line )

    if not availableCEs:
      self.log.info( 'List-Match failed to find CEs for TaskQueue', taskQueueID )
      self.log.info( stdout )
      self.log.info( stderr )
    else:
      self.log.debug( 'List-Match returns:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.log.info( 'List-Match found %s CEs for TaskQueue' % len( availableCEs ), taskQueueID )
      self.log.verbose( ', '.join( availableCEs ) )


    return availableCEs

  def parseJobSubmitStdout( self, proxy, cmd, taskQueueID, rb ):
    """
      Parse Job Submit stdout to return pilot reference
    """
    start = time.time()
    self.log.verbose( 'Executing Job Submit for TaskQueue', taskQueueID )

    ret = executeGridCommand( proxy, cmd, self.gridEnv )

    if not ret['OK']:
      self.log.error( 'Failed to execute Job Submit:', ret['Message'] )
      self.__sendErrorMail( rb, 'Job Submit', cmd, ret, proxy )
      return False
    if ret['Value'][0] != 0:
      self.log.error( 'Error executing Job Submit:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.__sendErrorMail( rb, 'Job Submit', cmd, ret, proxy )
      return False
    self.log.info( 'Job Submit Execution Time: %.2f for TaskQueue %d' % ( ( time.time() - start ), taskQueueID ) )

    stdout = ret['Value'][1]
    stderr = ret['Value'][2]

    submittedPilot = None

    failed = 1
    rb = ''
    for line in List.fromChar( stdout, '\n' ):
      m = re.search( "(https:\S+)", line )
      if ( m ):
        glite_id = m.group( 1 )
        submittedPilot = glite_id
        if not rb:
          m = re.search( "https://(.+):.+", glite_id )
          rb = m.group( 1 )
        failed = 0
    if failed:
      self.log.error( 'Job Submit returns no Reference:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      return False

    self.log.info( 'Reference %s for TaskQueue %s' % ( glite_id, taskQueueID ) )

    return glite_id, rb

  def _writeJDL( self, filename, jdlList ):
    try:
      f = open( filename, 'w' )
      f.write( '\n'.join( jdlList ) )
      f.close()
    except Exception, x:
      self.log.exception()
      return ''

    return filename