Ejemplo n.º 1
0
  def __init__( self, catalogs = None, vo = None ):
    """ Default constructor
    """
    self.valid = True
    self.timeout = 180
    self.readCatalogs = []
    self.writeCatalogs = []
    self.metaCatalogs = []
    self.rootConfigPath = '/Resources/FileCatalogs'
    self.vo = vo if vo else getVOfromProxyGroup().get( 'Value', None )

    self.opHelper = Operations( vo = self.vo )

    if catalogs is None:
      catalogList = []
    elif type( catalogs ) in types.StringTypes:
      catalogList = [catalogs]
    else:
      catalogList = catalogs

    if catalogList:
      res = self._getSelectedCatalogs( catalogList )
    else:
      res = self._getCatalogs()
    if not res['OK']:
      self.valid = False
    elif ( len( self.readCatalogs ) == 0 ) and ( len( self.writeCatalogs ) == 0 ):
      self.valid = False
Ejemplo n.º 2
0
  def __init__( self, catalogs = [], vo = None ):
    """ Default constructor
    """
    self.valid = True
    self.timeout = 180
    self.readCatalogs = []
    self.writeCatalogs = []
    self.vo = vo
    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return result
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )
    self.reHelper = Resources( vo = self.vo )

    if type( catalogs ) in types.StringTypes:
      catalogs = [catalogs]
    if catalogs:
      res = self._getSelectedCatalogs( catalogs )
    else:
      res = self._getCatalogs()
    if not res['OK']:
      self.valid = False
    elif ( len( self.readCatalogs ) == 0 ) and ( len( self.writeCatalogs ) == 0 ):
      self.valid = False
Ejemplo n.º 3
0
 def getCEStatus( self ):
   """ Method to return information on running and pending jobs.
   """
   vo = ''
   res = getVOfromProxyGroup()
   if res['OK']:
     vo = res['Value']
   else: # A backup solution which may work
     vo = self.ceParameters['VO']
   cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' %(self.ceHost, vo.lower())
   res = shellCall( 0, cmd )
   if not res['OK']:
     gLogger.debug("Could not query CE %s - is it down?" % self.ceHost)
     return res
   result = S_OK()
   try:
     ldapValues = res['Value'][1].split("\n")
     running = [y for y in ldapValues if 'GlueCEStateRunningJobs' in y]
     waiting = [y for y in ldapValues if 'GlueCEStateWaitingJobs' in y]
     result['RunningJobs'] = int(running[0].split(":")[1])
     result['WaitingJobs'] = int(waiting[0].split(":")[1])
   except IndexError:
     result = S_ERROR('Unknown ldap failure for site %s' % self.ceHost)
     result['RunningJobs'] = 0
     result['WaitingJobs'] = 0
   gLogger.debug("Running jobs for CE %s : %s" % (self.ceHost, result['RunningJobs']))
   gLogger.debug("Waiting jobs for CE %s : %s" % (self.ceHost, result['WaitingJobs']))
   result['SubmittedJobs'] = 0
   return result
Ejemplo n.º 4
0
    def __init__(self, catalogs=None, vo=None):
        """ Default constructor
    """
        self.valid = True
        self.timeout = 180
        self.readCatalogs = []
        self.writeCatalogs = []
        self.metaCatalogs = []
        self.rootConfigPath = '/Resources/FileCatalogs'
        self.vo = vo if vo else getVOfromProxyGroup().get('Value', None)

        self.opHelper = Operations(vo=self.vo)

        if catalogs is None:
            catalogList = []
        elif type(catalogs) in types.StringTypes:
            catalogList = [catalogs]
        else:
            catalogList = catalogs

        if catalogList:
            res = self._getSelectedCatalogs(catalogList)
        else:
            res = self._getCatalogs()
        if not res['OK']:
            self.valid = False
        elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0):
            self.valid = False
Ejemplo n.º 5
0
  def __call__(self, name, plugins=None, vo=None, hideExceptions=False):
    self.seCache.purgeExpired(expiredInSeconds=60)
    tId = threading.current_thread().ident

    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      vo = result['Value']

    # Because the gfal2 context caches the proxy location,
    # we also use the proxy location as a key.
    # In practice, there should almost always be one, except for the REA
    # If we see its memory consumtpion exploding, this might be a place to look
    proxyLoc = getProxyLocation()

    argTuple = (tId, name, plugins, vo, proxyLoc)
    seObj = self.seCache.get(argTuple)

    if not seObj:
      seObj = StorageElementItem(name, plugins, vo, hideExceptions=hideExceptions)
      # Add the StorageElement to the cache for 1/2 hour
      self.seCache.add(argTuple, 1800, seObj)

    return seObj
Ejemplo n.º 6
0
 def getCEStatus(self):
     """ Method to return information on running and pending jobs.
 """
     vo = ''
     res = getVOfromProxyGroup()
     if res['OK']:
         vo = res['Value']
     else:  # A backup solution which may work
         vo = self.ceParameters['VO']
     cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' % (
         self.ceHost, vo.lower())
     res = shellCall(0, cmd)
     if not res['OK']:
         gLogger.debug("Could not query CE %s - is it down?" % self.ceHost)
         return res
     result = S_OK()
     try:
         ldapValues = res['Value'][1].split("\n")
         running = [y for y in ldapValues if 'GlueCEStateRunningJobs' in y]
         waiting = [y for y in ldapValues if 'GlueCEStateWaitingJobs' in y]
         result['RunningJobs'] = int(running[0].split(":")[1])
         result['WaitingJobs'] = int(waiting[0].split(":")[1])
     except IndexError:
         result = S_ERROR('Unknown ldap failure for site %s' % self.ceHost)
         result['RunningJobs'] = 0
         result['WaitingJobs'] = 0
     gLogger.debug("Running jobs for CE %s : %s" %
                   (self.ceHost, result['RunningJobs']))
     gLogger.debug("Waiting jobs for CE %s : %s" %
                   (self.ceHost, result['WaitingJobs']))
     result['SubmittedJobs'] = 0
     return result
Ejemplo n.º 7
0
    def __init__(self, catalogs=[], vo=None):
        """ Default constructor
    """
        self.valid = True
        self.timeout = 180
        self.readCatalogs = []
        self.writeCatalogs = []
        self.vo = vo
        if not vo:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return result
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)
        self.reHelper = Resources(vo=self.vo)

        if type(catalogs) in types.StringTypes:
            catalogs = [catalogs]
        if catalogs:
            res = self._getSelectedCatalogs(catalogs)
        else:
            res = self._getCatalogs()
        if not res['OK']:
            self.valid = False
        elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0):
            self.valid = False
Ejemplo n.º 8
0
    def getCEStatus(self):
        """ Method to return information on running and pending jobs.
        We hope to satisfy both instances that use robot proxies and those which use proper configurations.
    """

        result = self._prepareProxy()
        self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"])
        if not result["OK"]:
            gLogger.error("ARCComputingElement: failed to set up proxy", result["Message"])
            return result

        # Try to find out which VO we are running for.
        vo = ""
        res = getVOfromProxyGroup()
        if res["OK"]:
            vo = res["Value"]

        result = S_OK()
        result["SubmittedJobs"] = 0
        if not vo:
            # Presumably the really proper way forward once the infosys-discuss WG comes up with a solution
            # and it is implemented. Needed for DIRAC instances which use robot certificates for pilots.
            endpoints = [
                arc.Endpoint(
                    "ldap://" + self.ceHost + "/MDS-Vo-name=local,o=grid",
                    arc.Endpoint.COMPUTINGINFO,
                    "org.nordugrid.ldapng",
                )
            ]
            retriever = arc.ComputingServiceRetriever(self.usercfg, endpoints)
            retriever.wait()  # Takes a bit of time to get and parse the ldap information
            targets = retriever.GetExecutionTargets()
            ceStats = targets[0].ComputingShare
            gLogger.debug("Running jobs for CE %s : %s" % (self.ceHost, ceStats.RunningJobs))
            gLogger.debug("Waiting jobs for CE %s : %s" % (self.ceHost, ceStats.WaitingJobs))
            result["RunningJobs"] = ceStats.RunningJobs
            result["WaitingJobs"] = ceStats.WaitingJobs
        else:
            # The system which works properly at present for ARC CEs that are configured correctly.
            # But for this we need the VO to be known - ask me (Raja) for the whole story if interested.
            cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' % (
                self.ceHost,
                vo.lower(),
            )
            res = shellCall(0, cmd)
            if not res["OK"]:
                gLogger.debug("Could not query CE %s - is it down?" % self.ceHost)
                return res
            try:
                ldapValues = res["Value"][1].split("\n")
                running = [lValue for lValue in ldapValues if "GlueCEStateRunningJobs" in lValue]
                waiting = [lValue for lValue in ldapValues if "GlueCEStateWaitingJobs" in lValue]
                result["RunningJobs"] = int(running[0].split(":")[1])
                result["WaitingJobs"] = int(waiting[0].split(":")[1])
            except IndexError:
                res = S_ERROR("Unknown ldap failure for site %s" % self.ceHost)
                return res

        return result
Ejemplo n.º 9
0
    def createCatalog(self,
                      catalogName,
                      useProxy=False,
                      vo=None,
                      catalogConfig={}):
        """ Create a file catalog object from its name and CS description
    """
        if useProxy:
            catalog = FileCatalogProxyClient(catalogName)
            return S_OK(catalog)

        # get the CS description first
        catConfig = catalogConfig
        if not catConfig:
            if not vo:
                result = getVOfromProxyGroup()
                if not result['OK']:
                    return result
                vo = result['Value']
            reHelper = Resources(vo=vo)
            result = reHelper.getCatalogOptionsDict(catalogName)
            if not result['OK']:
                return result
            catConfig = result['Value']

        catalogType = catConfig.get('CatalogType', catalogName)
        catalogURL = catConfig.get('CatalogURL', '')

        self.log.verbose('Creating %s client' % catalogName)
        moduleRootPaths = getInstalledExtensions()
        for moduleRootPath in moduleRootPaths:
            gLogger.verbose("Trying to load from root path %s" %
                            moduleRootPath)
            #moduleFile = os.path.join( rootPath, moduleRootPath, "Resources", "Catalog", "%sClient.py" % catalogType )
            #gLogger.verbose( "Looking for file %s" % moduleFile )
            #if not os.path.isfile( moduleFile ):
            #  continue
            try:
                # This enforces the convention that the plug in must be named after the file catalog
                moduleName = "%sClient" % (catalogType)
                catalogModule = __import__(
                    '%s.Resources.Catalog.%s' % (moduleRootPath, moduleName),
                    globals(), locals(), [moduleName])
            except ImportError, x:
                if "No module" in str(x):
                    gLogger.debug('Catalog module %s not found in %s' %
                                  (catalogType, moduleRootPath))
                else:
                    errStr = "Failed attempt to import %s from the path %s: %s" % (
                        catalogType, moduleRootPath, x)
                    gLogger.error(errStr)
                continue
            except Exception, x:
                errStr = "Failed attempt to import %s from the path %s: %s" % (
                    catalogType, moduleRootPath, x)
                gLogger.error(errStr)
                continue
Ejemplo n.º 10
0
    def createCatalog(self,
                      catalogName,
                      useProxy=False,
                      vo=None,
                      catalogConfig={}):
        """ Create a file catalog object from its name and CS description
    """
        if useProxy:
            catalog = FileCatalogProxyClient(catalogName)
            return S_OK(catalog)

        # get the CS description first
        catConfig = catalogConfig
        if not catConfig:
            if not vo:
                result = getVOfromProxyGroup()
                if not result['OK']:
                    return result
                vo = result['Value']
            reHelper = Resources(vo=vo)
            result = reHelper.getCatalogOptionsDict(catalogName)
            if not result['OK']:
                return result
            catConfig = result['Value']

        catalogType = catConfig.get('CatalogType', catalogName)
        catalogURL = catConfig.get('CatalogURL', '')

        self.log.verbose('Creating %s client' % catalogName)

        objectLoader = ObjectLoader()
        result = objectLoader.loadObject(
            'Resources.Catalog.%sClient' % catalogType, catalogType + 'Client')
        if not result['OK']:
            gLogger.error('Failed to load catalog object: %s' %
                          result['Message'])
            return result

        catalogClass = result['Value']

        try:
            if catalogType in ['LcgFileCatalogCombined', 'LcgFileCatalog']:
                # The LFC special case
                infoSys = catConfig.get('LcgGfalInfosys', '')
                host = catConfig.get('MasterHost', '')
                catalog = catalogClass(infoSys, host)
            else:
                if catalogURL:
                    catalog = catalogClass(url=catalogURL)
                else:
                    catalog = catalogClass()
            self.log.debug('Loaded module %sClient' % catalogType)
            return S_OK(catalog)
        except Exception, x:
            errStr = "Failed to instantiate %s()" % (catalogType)
            gLogger.exception(errStr, lException=x)
            return S_ERROR(errStr)
Ejemplo n.º 11
0
def main():
    Script.setUsageMessage("\n".join([
        "Get VM nodes information",
        "Usage:",
        "%s [option]... [cfgfile]" % Script.scriptName,
        "Arguments:",
        " cfgfile: DIRAC Cfg with description of the configuration (optional)",
    ]))
    Script.registerSwitch("S:", "Site=", "Site Name", setSite)
    Script.registerSwitch("C:", "CE=", "Cloud Endpoint Name ", setCE)
    Script.registerSwitch("I:", "Image=", "Image Name", setImage)
    Script.registerSwitch("v:", "vo=", "VO name", setVO)
    Script.parseCommandLine(ignoreErrors=True)
    args = Script.getExtraCLICFGFiles()

    from DIRAC.WorkloadManagementSystem.Client.VMClient import VMClient
    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
    from DIRAC.Core.Utilities.PrettyPrint import printTable

    siteList = None
    if site is not None:
        siteList = [s.strip() for s in site.split(",")]

    ceList = None
    if ce is not None:
        ceList = [c.strip() for c in ce.split(",")]

    voName = vo
    if voName is None:
        result = getVOfromProxyGroup()
        if result["OK"]:
            voName = result["Value"]

    records = []
    vmClient = VMClient()

    result = vmClient.getCEInstances(siteList, ceList, voName)
    if not result["OK"]:
        gLogger.error(result["Message"])
        DIRACExit(-1)

    for nodeID in result["Value"]:
        nodeDict = result["Value"][nodeID]
        record = [
            nodeDict["Site"],
            nodeDict["CEName"],
            nodeID,
            nodeDict["NodeName"],
            nodeDict["PublicIP"],
            nodeDict["State"],
        ]
        records.append(record)

    fields = ["Site", "Endpoint", "ID", "Name", "PublicIP", "State"]
    printTable(fields, records)
    DIRACExit(0)
Ejemplo n.º 12
0
def constructUserLFNs(jobID, vo, owner, outputFiles, outputPath):
  """ This method is used to supplant the standard job wrapper output data policy
      for ILC.  The initial convention adopted for user output files is the following:
      If outputpath is not defined:
      <vo>/user/<initial e.g. s>/<owner e.g. sposs>/<yearMonth e.g. 2010_02>/<subdir>/<fileName>
      Otherwise:
      <vo>/user/<initial e.g. s>/<owner e.g. sposs>/<outputPath>/<fileName>
  """
  initial = owner[:1]
  subdir = str(jobID/1000)  
  timeTup = datetime.date.today().timetuple() 
  yearMonth = '%s_%s' % (timeTup[0], string.zfill(str(timeTup[1]), 2))
  outputLFNs = {}
  if not vo:
    #res = gConfig.getOption("/DIRAC/VirtualOrganization", "ilc")
    res = getVOfromProxyGroup()
    if not res['OK']:
      gLogger.error('Could not get VO from CS, assuming ilc')
      vo = 'ilc'
    else:
      vo = res['Value']
  ops = Operations(vo = vo)
  lfn_prefix = ops.getValue("LFNUserPrefix", "user")
      
  #Strip out any leading or trailing slashes but allow fine structure
  if outputPath:
    outputPathList = string.split(outputPath, os.sep)
    newPath = []
    for i in outputPathList:
      if i:
        newPath.append(i)
    outputPath = string.join(newPath, os.sep)
  
  if not type(outputFiles) == types.ListType:
    outputFiles = [outputFiles]
    
  for outputFile in outputFiles:
    #strip out any fine structure in the output file specified by the user, restrict to output file names
    #the output path field can be used to describe this    
    outputFile = outputFile.replace('LFN:', '')
    lfn = ''
    if outputPath:
      lfn = os.sep+os.path.join(vo, lfn_prefix, initial, owner, outputPath + os.sep + os.path.basename(outputFile))
    else:
      lfn = os.sep+os.path.join(vo, lfn_prefix, initial, owner, yearMonth, subdir, str(jobID)) + os.sep + os.path.basename(outputFile)
    outputLFNs[outputFile] = lfn
  
  outputData = outputLFNs.values()
  if outputData:
    gLogger.info('Created the following output data LFN(s):\n%s' % (string.join(outputData, '\n')))
  else:
    gLogger.info('No output LFN(s) constructed')
    
  return S_OK(outputData)
Ejemplo n.º 13
0
 def _getCatalogs( self ):
   
   # Get the eligible catalogs first
   # First, look in the Operations, if nothing defined look in /Resources for backward compatibility
   result = getVOfromProxyGroup()
   if not result['OK']:
     return result
   vo = result['Value']
   opHelper = Operations( vo = vo )
   result = opHelper.getSections( '/Services/FileCatalogs' )
   fileCatalogs = []
   operationsFlag = False
   if result['OK']:
     fileCatalogs = result['Value']
     operationsFlag = True
   else:   
     res = gConfig.getSections( self.rootConfigPath, listOrdered = True )
     if not res['OK']:
       errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration."
       gLogger.error( errStr, res['Message'] )
       return S_ERROR( errStr )
     fileCatalogs = res['Value']
   
   # Get the catalogs now    
   for catalogName in fileCatalogs:
     res = self._getCatalogConfigDetails( catalogName )
     if not res['OK']:
       return res
     catalogConfig = res['Value']
     if operationsFlag:
       result = opHelper.getOptionsDict( '/Services/FileCatalogs/%s' % catalogName )
       if not result['OK']:
         return result
       catalogConfig.update( result['Value'] )        
     if catalogConfig['Status'] == 'Active':
       res = self._generateCatalogObject( catalogName )
       if not res['OK']:
         return res
       oCatalog = res['Value']
       master = catalogConfig['Master']
       # If the catalog is read type
       if re.search( 'Read', catalogConfig['AccessType'] ):
         if master:
           self.readCatalogs.insert( 0, ( catalogName, oCatalog, master ) )
         else:
           self.readCatalogs.append( ( catalogName, oCatalog, master ) )
       # If the catalog is write type
       if re.search( 'Write', catalogConfig['AccessType'] ):
         if master:
           self.writeCatalogs.insert( 0, ( catalogName, oCatalog, master ) )
         else:
           self.writeCatalogs.append( ( catalogName, oCatalog, master ) )
   return S_OK()
Ejemplo n.º 14
0
  def getCEStatus(self):
    """ Method to return information on running and pending jobs.
        We hope to satisfy both instances that use robot proxies and those which use proper configurations.
    """

    result = self._prepareProxy()
    if not result['OK']:
      gLogger.error('ARCComputingElement: failed to set up proxy', result['Message'])
      return result
    self.usercfg.ProxyPath(os.environ['X509_USER_PROXY'])

    # Try to find out which VO we are running for.
    vo = ''
    res = getVOfromProxyGroup()
    if res['OK']:
      vo = res['Value']

    result = S_OK()
    result['SubmittedJobs'] = 0
    if not vo:
      # Presumably the really proper way forward once the infosys-discuss WG comes up with a solution
      # and it is implemented. Needed for DIRAC instances which use robot certificates for pilots.
      endpoints = [arc.Endpoint("ldap://" + self.ceHost + "/MDS-Vo-name=local,o=grid",
                                arc.Endpoint.COMPUTINGINFO, 'org.nordugrid.ldapng')]
      retriever = arc.ComputingServiceRetriever(self.usercfg, endpoints)
      retriever.wait()  # Takes a bit of time to get and parse the ldap information
      targets = retriever.GetExecutionTargets()
      ceStats = targets[0].ComputingShare
      gLogger.debug("Running jobs for CE %s : %s" % (self.ceHost, ceStats.RunningJobs))
      gLogger.debug("Waiting jobs for CE %s : %s" % (self.ceHost, ceStats.WaitingJobs))
      result['RunningJobs'] = ceStats.RunningJobs
      result['WaitingJobs'] = ceStats.WaitingJobs
    else:
      # The system which works properly at present for ARC CEs that are configured correctly.
      # But for this we need the VO to be known - ask me (Raja) for the whole story if interested.
      cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' % (
          self.ceHost, vo.lower())
      res = shellCall(0, cmd)
      if not res['OK']:
        gLogger.debug("Could not query CE %s - is it down?" % self.ceHost)
        return res
      try:
        ldapValues = res['Value'][1].split("\n")
        running = [lValue for lValue in ldapValues if 'GlueCEStateRunningJobs' in lValue]
        waiting = [lValue for lValue in ldapValues if 'GlueCEStateWaitingJobs' in lValue]
        result['RunningJobs'] = int(running[0].split(":")[1])
        result['WaitingJobs'] = int(waiting[0].split(":")[1])
      except IndexError:
        res = S_ERROR('Unknown ldap failure for site %s' % self.ceHost)
        return res

    return result
Ejemplo n.º 15
0
 def createCatalog( self, catalogName, useProxy = False, vo = None, catalogConfig = {} ):
   """ Create a file catalog object from its name and CS description
   """    
   if useProxy:
     catalog = FileCatalogProxyClient( catalogName )
     return S_OK( catalog )
   
   # get the CS description first
   catConfig = catalogConfig
   if not catConfig:
     if not vo:
       result = getVOfromProxyGroup()
       if not result['OK']:
         return result
       vo = result['Value']
     reHelper = Resources( vo = vo )
     result = reHelper.getCatalogOptionsDict( catalogName )
     if not result['OK']:
       return result
     catConfig = result['Value']
   
   catalogType = catConfig.get('CatalogType',catalogName)
   catalogURL = catConfig.get('CatalogURL','')
   
   self.log.verbose( 'Creating %s client' % catalogName )
   
   objectLoader = ObjectLoader()
   result = objectLoader.loadObject( 'Resources.Catalog.%sClient' % catalogType, catalogType+'Client' )
   if not result['OK']:
     gLogger.error( 'Failed to load catalog object: %s' % result['Message'] )
     return result
   
   catalogClass = result['Value']
    
   try:
     if catalogType in ['LcgFileCatalogCombined','LcgFileCatalog']:
       # The LFC special case
       infoSys = catConfig.get('LcgGfalInfosys','')
       host = catConfig.get('MasterHost','')
       catalog = catalogClass( infoSys, host )
     else:  
       if catalogURL:
         catalog = catalogClass( url = catalogURL )  
       else:  
         catalog = catalogClass()
     self.log.debug('Loaded module %sClient' % catalogType )
     return S_OK( catalog )
   except Exception, x:
     errStr = "Failed to instantiate %s()" % ( catalogType )
     gLogger.exception( errStr, lException = x )
     return S_ERROR( errStr )
Ejemplo n.º 16
0
 def __init__(self, useProxy=False, vo=None):
   self.proxy = False
   self.proxy = useProxy
   self.resourceStatus = ResourceStatus()
   self.vo = vo
   if self.vo is None:
     result = getVOfromProxyGroup()
     if result['OK']:
       self.vo = result['Value']
     else:
       RuntimeError("Can not get the current VO context")
   self.remotePlugins = []
   self.localPlugins = []
   self.name = ''
   self.options = {}
   self.protocols = {}
   self.storages = []
Ejemplo n.º 17
0
 def __init__( self, useProxy = False, vo = None ):
   self.rootConfigPath = '/Resources/StorageElements'
   self.proxy = False
   self.proxy = useProxy
   self.resourceStatus = ResourceStatus()
   self.vo = vo
   if self.vo is None:
     result = getVOfromProxyGroup()
     if result['OK']:
       self.vo = result['Value']
     else:
       RuntimeError( "Can not get the current VO context" )
   self.remotePlugins = []
   self.localPlugins = []
   self.name = ''
   self.options = {}
   self.protocolDetails = []
   self.storages = []
Ejemplo n.º 18
0
  def __call__( self, name, plugins = None, vo = None, hideExceptions = False ):
    self.seCache.purgeExpired( expiredInSeconds = 60 )
    tId = threading.current_thread().ident

    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      vo = result['Value']

    argTuple = ( tId, name, plugins, vo )
    seObj = self.seCache.get( argTuple )

    if not seObj:
      seObj = StorageElementItem( name, plugins, vo, hideExceptions = hideExceptions )
      # Add the StorageElement to the cache for 1/2 hour
      self.seCache.add( argTuple, 1800, seObj )

    return seObj
Ejemplo n.º 19
0
  def __call__( self, name, plugins = None, vo = None, hideExceptions = False ):
    self.seCache.purgeExpired( expiredInSeconds = 60 )
    tId = threading.current_thread().ident

    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      vo = result['Value']

    argTuple = ( tId, name, plugins, vo )
    seObj = self.seCache.get( argTuple )

    if not seObj:
      seObj = StorageElementItem( name, plugins, vo, hideExceptions = hideExceptions )
      # Add the StorageElement to the cache for 1/2 hour
      self.seCache.add( argTuple, 1800, seObj )

    return seObj
Ejemplo n.º 20
0
 def __discoverSettings( self ):
   #Set the VO
   globalVO = CSGlobals.getVO()
   if globalVO:
     self.__vo = globalVO
   elif self.__uVO:
     self.__vo = self.__uVO
   elif self.__uGroup:
     self.__vo = Registry.getVOForGroup( self.__uGroup )
     if not self.__vo:
       self.__vo = False
   else:
     result = getVOfromProxyGroup()
     if result['OK']:
       self.__vo = result['Value']    
   #Set the setup
   self.__setup = False
   if self.__uSetup:
     self.__setup = self.__uSetup
   else:
     self.__setup = CSGlobals.getSetup()
Ejemplo n.º 21
0
 def __discoverSettings(self):
     #Set the VO
     globalVO = CSGlobals.getVO()
     if globalVO:
         self.__vo = globalVO
     elif self.__uVO:
         self.__vo = self.__uVO
     elif self.__uGroup:
         self.__vo = Registry.getVOForGroup(self.__uGroup)
         if not self.__vo:
             self.__vo = False
     else:
         result = getVOfromProxyGroup()
         if result['OK']:
             self.__vo = result['Value']
     #Set the setup
     self.__setup = False
     if self.__uSetup:
         self.__setup = self.__uSetup
     else:
         self.__setup = CSGlobals.getSetup()
Ejemplo n.º 22
0
 def __discoverSettings(self):
     """Discovers the vo and the setup"""
     # Set the VO
     globalVO = CSGlobals.getVO()
     if globalVO:
         self.__vo = globalVO
     elif self.__uVO:
         self.__vo = self.__uVO
     elif self.__uGroup:
         self.__vo = Registry.getVOForGroup(self.__uGroup)
         if not self.__vo:
             self.__vo = False
     else:
         result = getVOfromProxyGroup()
         if result["OK"]:
             self.__vo = result["Value"]
     # Set the setup
     self.__setup = False
     if self.__uSetup:
         self.__setup = self.__uSetup
     else:
         self.__setup = CSGlobals.getSetup()
Ejemplo n.º 23
0
 def getCEStatus( self ):
   """ Method to return information on running and pending jobs.
   """
   vo = ''
   result = getVOfromProxyGroup()
   if result['OK']:
     vo = result['Value']
   else: # A backup solution which may work
     vo = self.ceParameters['VO']
   voFilters = '(GlueCEAccessControlBaseRule=VOMS:/%s/*)' % vo
   voFilters += '(GlueCEAccessControlBaseRule=VOMS:/%s)' % vo
   voFilters += '(GlueCEAccessControlBaseRule=VO:%s)' % vo
   filt = '(&(GlueCEUniqueID=%s*)(|%s))' % ( self.ceHost, voFilters )
   result = ldapsearchBDII( filt, attr=None, host=None, base=None )
   ces = result['Value']
   filt = '(&(objectClass=GlueVOView)(|%s))' % ( voFilters )
   dn = ces[0]['dn']
   result = ldapsearchBDII( filt, attr=None, host=None, base = dn )
   stats = result['Value'][0]['attr']
   result['RunningJobs'] = int(stats["GlueCEStateRunningJobs"])
   result['WaitingJobs'] = int(stats["GlueCEStateTotalJobs"])
   result['SubmittedJobs'] = 0
   return result
Ejemplo n.º 24
0
  def getReplicas(self, lfns, allStatus=False, timeout=120):
    """ Get the replicas of the given files
    """
    rpcClient = self._getRPC(timeout=timeout)
    result = rpcClient.getReplicas(lfns, allStatus)
    if not result['OK']:
      return result
    vo = getVOfromProxyGroup().get('Value', None)

    lfnDict = result['Value']
    seDict = result['Value'].get('SEPrefixes', {})
    for lfn in lfnDict['Successful']:
      for se in lfnDict['Successful'][lfn]:
        if not lfnDict['Successful'][lfn][se]:
          # The PFN was not returned, construct it on the fly
          # For some VO's the prefix can be non-standard
          voPrefix = seDict.get("VOPrefix", {}).get(se, {}).get(vo)
          sePrefix = seDict.get(se, '')
          prefix = voPrefix if voPrefix else sePrefix

          lfnDict['Successful'][lfn][se] = prefix + lfn

    return S_OK(lfnDict)
Ejemplo n.º 25
0
    def getReplicas(self, lfns, allStatus=False, timeout=120):
        """ Get the replicas of the given files
    """
        rpcClient = self._getRPC(timeout=timeout)
        result = rpcClient.getReplicas(lfns, allStatus)
        if not result['OK']:
            return result
        vo = getVOfromProxyGroup().get('Value', None)

        lfnDict = result['Value']
        seDict = result['Value'].get('SEPrefixes', {})
        for lfn in lfnDict['Successful']:
            for se in lfnDict['Successful'][lfn]:
                if not lfnDict['Successful'][lfn][se]:
                    # The PFN was not returned, construct it on the fly
                    # For some VO's the prefix can be non-standard
                    voPrefix = seDict.get("VOPrefix", {}).get(se, {}).get(vo)
                    sePrefix = seDict.get(se, '')
                    prefix = voPrefix if voPrefix else sePrefix

                    lfnDict['Successful'][lfn][se] = prefix + lfn

        return S_OK(lfnDict)
Ejemplo n.º 26
0
        ces_current_site = sites_ce[site]
        for ce in ces_current_site:
            if ce in vo_ces:
                curr_ces = final_dict[site]["CE"]
                curr_ces.append(ce)
                final_dict[site].update({"Tags":ldapTag(ce,vo),"CE":curr_ces})
                #final_dict[site]={"Tags":ldapTag(ce,vo),"CE":[ce]}
    ret_dict = {}
    for key in final_dict:
        if len(final_dict[key]['CE'])!=0:
            ret_dict[key]=final_dict[key]
    return ret_dict

if __name__ == "__main__":
    Script.parseCommandLine()
    vo = "glast.org"
    res = getVOfromProxyGroup()
    if not res['OK']:
        gLogger.error(res['Message'])
        gLogger.error('Could not get VO from CS, assuming glast.org')
        dexit(1)
    else:
        gLogger.info(res)
        vo = res['Value']
    d = main(vo)
    for key in d:
        print('Name of DIRAC site %s\nName of CEs: %s'%(key,str(d[key]["CE"])))
        for tag in d[key]["Tags"]:
            print('\t%s'%tag)
        print('\n')
Ejemplo n.º 27
0
def main():

    from DIRAC import S_OK, gLogger, gConfig, exit as DIRACExit

    ceFlag = False
    seFlag = False
    voName = None

    def setCEFlag(args_):
        global ceFlag
        ceFlag = True

    def setSEFlag(args_):
        global seFlag
        seFlag = True

    def setVOName(args):
        global voName
        voName = args

    Script.registerSwitch("C", "ce", "Get CE info", setCEFlag)
    Script.registerSwitch("S", "se", "Get SE info", setSEFlag)
    Script.registerSwitch(
        "V:", "vo=",
        "Get resources for the given VO. If not set, taken from the proxy",
        setVOName)

    Script.parseCommandLine(ignoreErrors=True)

    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
    from DIRAC.ConfigurationSystem.Client.Helpers import Resources
    from DIRAC.Core.Utilities.PrettyPrint import printTable
    from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
    from DIRAC.Resources.Storage.StorageElement import StorageElement
    from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
    from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus

    def printCEInfo(voName):

        resultQueues = Resources.getQueues(community=voName)
        if not resultQueues["OK"]:
            gLogger.error("Failed to get CE information")
            DIRACExit(-1)

        fields = ("Site", "CE", "CEType", "Queue", "Status")
        records = []

        # get list of usable sites within this cycle
        resultMask = SiteStatus().getUsableSites()
        if not resultMask["OK"]:
            return resultMask
        siteMaskList = resultMask.get("Value", [])

        rssClient = ResourceStatus()

        for site in resultQueues["Value"]:
            siteStatus = "Active" if site in siteMaskList else "InActive"
            siteNew = True
            for ce in resultQueues["Value"][site]:

                ceStatus = siteStatus
                if rssClient.rssFlag:
                    result = rssClient.getElementStatus(ce, "ComputingElement")
                    if result["OK"]:
                        ceStatus = result["Value"][ce]["all"]

                ceNew = True
                for queue in resultQueues["Value"][site][ce]["Queues"]:
                    pSite = site if siteNew else ""
                    pCE = ""
                    ceType = ""
                    if ceNew:
                        pCE = ce
                        ceType = resultQueues["Value"][site][ce]["CEType"]
                    records.append((pSite, pCE, ceType, queue, ceStatus))
                    ceNew = False
                    siteNew = False

        gLogger.notice(
            printTable(fields, records, printOut=False, columnSeparator="  "))
        return S_OK()

    def printSEInfo(voName):

        fields = ("SE", "Status", "Protocols", "Aliases")
        records = []

        for se in DMSHelpers(voName).getStorageElements(
        ):  # this will get the full list of SEs, not only the vo's ones.
            seObject = StorageElement(se)
            if not (seObject.vo and voName in seObject.vo.strip().split(",")
                    or not seObject.voName):
                continue

            result = seObject.status()
            status = []
            for statusType in ["Write", "Read"]:
                if result[statusType]:
                    status.append(statusType)

            if status:
                status = "/".join(status)
            else:
                status = "InActive"

            records.append((se, status, ",".join([
                seProtocol["Protocol"]
                for seProtocol in seObject.protocolOptions
            ])))

        gLogger.notice(
            printTable(fields, records, printOut=False, columnSeparator="  "))
        return S_OK()

    if not voName:
        # Get the current VO
        result = getVOfromProxyGroup()
        if not result["OK"]:
            gLogger.error("No proxy found, please login")
            DIRACExit(-1)
        voName = result["Value"]
    else:
        result = gConfig.getSections("/Registry/VO")
        if not result["OK"]:
            gLogger.error("Failed to contact the CS")
            DIRACExit(-1)
        if voName not in result["Value"]:
            gLogger.error("Invalid VO name")
            DIRACExit(-1)

    if not (ceFlag or seFlag):
        gLogger.error("Resource type is not specified")
        DIRACExit(-1)

    if ceFlag:
        result = printCEInfo(voName)
        if not result["OK"]:
            gLogger.error(result["Message"])
            DIRACExit(-1)
    if seFlag:
        result = printSEInfo(voName)
        if not result["OK"]:
            gLogger.error(result["Message"])
            DIRACExit(-1)

    DIRACExit(0)
Ejemplo n.º 28
0
def constructProductionLFNs(paramDict):
  """ Used for local testing of a workflow, a temporary measure until
      LFN construction is tidied.  This works using the workflow commons for
      on the fly construction.
  """
  keys = ['PRODUCTION_ID', 'JOB_ID', 'JobType', 'outputList']
  for k in keys:
    if not paramDict.has_key(k):
      return S_ERROR('%s not defined' % k)

  productionID = paramDict['PRODUCTION_ID']
  jobID = paramDict['JOB_ID']
#  wfMode = paramDict['dataType']
  #wfLfnprefix=paramDict['lfnprefix']
  #wfLfnpostfix=paramDict['lfnpostfix']
  wfMask = ""
  # wfMask = paramDict['outputDataFileMask']
  if not type(wfMask) == type([]):
    wfMask = [i.lower().strip() for i in wfMask.split(';')]
  wfType = paramDict['JobType']
  outputList = paramDict['outputList']
  inputData = ''
  if paramDict.has_key('InputData'):
    inputData = paramDict['InputData']

  res = getVOfromProxyGroup()
  #res = gConfig.getOption("/DIRAC/VirtualOrganization", "ilc")
  if not res['OK']:
    gLogger.error('Could not get VO from CS, assuming ilc')
    vo = 'ilc'
  else:
    vo = res['Value']
  fileTupleList = []
  #gLogger.verbose('wfLfnprefix = %s, wfLfnpostfix = %s, wfMask = %s, wfType=%s' %(wfLfnprefix,wfLfnpostfix,wfMask,wfType))
  gLogger.verbose('outputList %s' % (outputList))
  for info in outputList:
    #Nasty check on whether the created code parameters were not updated e.g. when changing defaults in a workflow
    fileName = info['outputFile']
    #rename to take care of correct path
    fileName = getProdFilename(fileName, int(productionID), int(jobID))
    #index=0
    #if not re.search('^\d',fileName[index]):
    #  index+=1
    #if not fileName[index]==str(productionID).zfill(8):
    #  fileName[index]=str(productionID).zfill(8)
    #if not fileName[index+1]==str(jobID).zfill(8):
    #  fileName[index+1]=str(jobID).zfill(8)
    fileTupleList.append((info['outputPath'], fileName))

  lfnRoot = ''
  debugRoot = ''
  #if inputData:
  #  gLogger.verbose('Making LFN_ROOT for job with inputdata: %s' %(inputData))
  #  lfnRoot = _getLFNRoot(inputData,wfLfnpostfix)
  #  debugRoot= _getLFNRoot('','debug',wfLfnpostfix)   
  #else:
  #  lfnRoot = _getLFNRoot('',wfLfnprefix,wfLfnpostfix)
  #  gLogger.verbose('LFN_ROOT is: %s' %(lfnRoot))
  #  debugRoot= _getLFNRoot('','debug',wfLfnpostfix)
  #lfnRoot = 
  #gLogger.verbose('LFN_ROOT is: %s' %(lfnRoot))
  #if not lfnRoot:
  #  return S_ERROR('LFN root could not be constructed')

  #Get all LFN(s) to both output data and BK lists at this point (fine for BK)
  outputData = []
  #bkLFNs = []
  debugLFNs = []
  for fileTuple in fileTupleList:
    #lfn = _makeProductionLfn(str(jobID).zfill(8),lfnRoot,fileTuple,wfLfnprefix,str(productionID).zfill(8))
    lfn = fileTuple[0] + "/" + str(productionID).zfill(8) + "/" + str(int(jobID)/1000).zfill(3) + "/" + fileTuple[1]
    if lfn.count('//'):
      lfn = lfn.replace('//','/')
    outputData.append(lfn)
    #bkLFNs.append(lfn)
    if debugRoot:
      #debugLFNs.append(_makeProductionLfn(str(jobID).zfill(8),debugRoot,fileTuple,wfLfnprefix,str(productionID).zfill(8)))
      debugLFNs.append("/" + vo + "/prod/debug/" + str(productionID).zfill(8))
  #if debugRoot:
  # debugLFNs.append(_makeProductionLfn(str(jobID).zfill(8),debugRoot,('%s_core' % str(jobID).zfill(8) ,'core'),wfLfnprefix,str(productionID).zfill(8)))

  #Get log file path - unique for all modules
  #logPath = _makeProductionPath(str(jobID).zfill(8),lfnRoot,'LOG',wfLfnprefix,str(productionID).zfill(8),log=True)
  logPathtemp = fileTupleList[0][0].split("/")
  logPathroot = string.join(logPathtemp[0:len(logPathtemp)-1], "/")
  #TODO adjust for ILD
  logPath = logPathroot + "/LOG/" + str(productionID).zfill(8)
  logFilePath = ['%s/%s' % (logPath, str(int(jobID)/1000).zfill(3))]
  logTargetPath = ['%s/%s_%s.tar' % (logPath, str(productionID).zfill(8), str(int(jobID)).zfill(3))]
  #[ aside, why does makeProductionPath not append the jobID itself ????
  #  this is really only used in one place since the logTargetPath is just written to a text file (should be reviewed)... ]

  #Strip output data according to file mask
  if wfMask:
    newOutputData = []
    #newBKLFNs = []
    for od in outputData:
      for i in wfMask:
        if re.search('.%s$' % i, od):
          if not od in newOutputData:
            newOutputData.append(od)
            
    #for bk in bkLFNs:
    #  newBKLFNs.append(bk)
    outputData = newOutputData
    #bkLFNs = newBKLFNs

  if not outputData:
    gLogger.info('No output data LFN(s) constructed')
  else:
    gLogger.verbose('Created the following output data LFN(s):\n%s' % (string.join(outputData,'\n')))
  gLogger.verbose('Log file path is:\n%s' % logFilePath[0])
  gLogger.verbose('Log target path is:\n%s' % logTargetPath[0])
  #if bkLFNs:
  #  gLogger.verbose('BookkeepingLFN(s) are:\n%s' %(string.join(bkLFNs,'\n')))
  if debugLFNs:
    gLogger.verbose('DebugLFN(s) are:\n%s' % (string.join(debugLFNs, '\n')))
  jobOutputs = {'ProductionOutputData' : outputData, 'LogFilePath' : logFilePath,
                'LogTargetPath' : logTargetPath, 'DebugLFNs' : debugLFNs}
  return S_OK(jobOutputs)
Ejemplo n.º 29
0
def main():
    Script.registerSwitch("", "FromSE=", "SE1[,SE2,...]")
    Script.registerSwitch("", "TargetSE=", "SE1[,SE2,...]")
    Script.registerSwitch(
        "", "OutputFile=",
        "CSV output file (default /tmp/protocol-matrix.csv)")
    Script.registerSwitch(
        "", "Bidirection",
        "If FromSE or TargetSE are specified, make a square matrix ")
    Script.registerSwitch("", "FTS", "Display the protocols sent to FTS")
    Script.registerSwitch("", "TPC",
                          "Display the protocols tried for interactive TPC")
    Script.registerSwitch("", "Multihop", "Display the intermediate hop")
    Script.registerSwitch("", "Full", "Do not factorize with base SE")
    Script.registerSwitch("", "ExcludeSE=",
                          "SEs to not take into account for the matrix")

    Script.parseCommandLine()
    from DIRAC import gConfig, gLogger, S_ERROR
    from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
    from DIRAC.DataManagementSystem.private.FTS3Utilities import getFTS3Plugin
    from DIRAC.Resources.Storage.StorageElement import StorageElement
    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup

    fromSE = []
    targetSE = []
    excludeSE = []
    outputFile = "/tmp/protocol-matrix.csv"
    bidirection = False
    ftsTab = False
    tpcTab = False
    multihopTab = False
    fullOutput = False
    for switch in Script.getUnprocessedSwitches():
        if switch[0] == "FromSE":
            fromSE = switch[1].split(",")
        elif switch[0] == "TargetSE":
            targetSE = switch[1].split(",")
        elif switch[0] == "ExcludeSE":
            excludeSE = switch[1].split(",")
        elif switch[0] == "OutputFile":
            outputFile = switch[1]
        elif switch[0] == "Bidirection":
            bidirection = True
        elif switch[0] == "FTS":
            ftsTab = True
        elif switch[0] == "TPC":
            tpcTab = True
        elif switch[0] == "Multihop":
            multihopTab = True
        elif switch[0] == "Full":
            fullOutput = True

    if not any([ftsTab, tpcTab, multihopTab]):
        ftsTab = tpcTab = multihopTab = True

    fts3Plugin = getFTS3Plugin()
    thirdPartyProtocols = DMSHelpers().getThirdPartyProtocols()

    # List all the BaseSE
    seBases = gConfig.getSections("/Resources/StorageElementBases")["Value"]
    # construct a dict { baseSE : <inherited storages>}
    seForSeBases = {}

    allSEs = gConfig.getSections("/Resources/StorageElements/")["Value"]

    # Remove the SEs that we want to exclude
    allSEs = set(allSEs) - set(excludeSE)

    # We go through all the SEs and fill in the seForSEBases dict.
    # Basically, at the end of the loop, the dict will contain
    # for each baseSE an entry corresponding to one real storage (the first one)
    # and itself for each real non inherited SE
    for se in allSEs:
        baseSE = gConfig.getOption("/Resources/StorageElements/%s/BaseSE" %
                                   se).get("Value")
        if baseSE and not fullOutput:
            if baseSE not in seForSeBases:
                seForSeBases[baseSE] = se
        else:
            # If no baseSE, we put self
            seForSeBases[se] = se

    # Now let's take into account what source and destination we want.

    # If the user did not specify source or dest, generate everything
    if not fromSE and not targetSE:
        fromSE = list(seForSeBases)
        targetSE = list(seForSeBases)
    else:  # he specified at least source of dest

        # if bidirection, source and target should be the same
        if bidirection:
            if not fromSE and targetSE:  # we gave target, but no source
                fromSE = targetSE
            elif fromSE and not targetSE:  # we gave source but no target
                targetSE = fromSE
            elif fromSE and targetSE:  # we gave both
                fromSE = targetSE = list(set(fromSE + targetSE))

        else:  # no bidirection
            # he specified a targetSE
            if not fromSE:
                fromSE = list(seForSeBases)
            elif not targetSE:
                targetSE = list(seForSeBases)

    fromSE = sorted(fromSE)
    targetSE = sorted(targetSE)

    gLogger.notice("Using sources: %s" % ",".join(fromSE))
    gLogger.notice("Using target: %s" % ",".join(targetSE))

    # Now we construct the SE object for each SE that we want to appear
    ses = {}
    for se in set(fromSE + targetSE):
        ses[se] = StorageElement(seForSeBases.get(se, se))

    ret = getVOfromProxyGroup()
    if not ret["OK"] or not ret.get("Value", ""):
        gLogger.error("Aborting, Bad Proxy:",
                      ret.get("Message", "Proxy does not belong to a VO!"))
        exit(1)
    vo = ret["Value"]
    gLogger.notice("Using the Virtual Organization:", vo)
    # dummy LFN, still has to follow lfn convention
    lfn = "/%s/toto.xml" % vo

    # Create a matrix of protocol src/dest

    tpMatrix = defaultdict(dict)
    ftsMatrix = defaultdict(dict)
    multihopMatrix = defaultdict(dict)

    # For each source and destination, generate the url pair, and the compatible third party protocols
    for src, dst in ((x, y) for x in fromSE for y in targetSE):

        if ftsTab:
            try:
                # breakpoint()
                fts3TpcProto = fts3Plugin.selectTPCProtocols(
                    sourceSEName=ses[src].name, destSEName=ses[dst].name)
                res = ses[dst].generateTransferURLsBetweenSEs(
                    lfn, ses[src], fts3TpcProto)
            except ValueError as e:
                res = S_ERROR(str(e))
            if not res["OK"]:
                surls = "Error"
                gLogger.notice(
                    "Could not generate transfer URLS",
                    "src:%s, dst:%s, error:%s" % (src, dst, res["Message"]))
            else:
                # We only keep the protocol part of the url
                surls = "/".join(res["Value"]["Protocols"])
            ftsMatrix[src][dst] = "%s" % surls
            gLogger.verbose("%s -> %s: %s" % (src, dst, surls))

        # Add also the third party protocols
        if tpcTab:
            proto = ",".join(ses[dst].negociateProtocolWithOtherSE(
                ses[src], thirdPartyProtocols)["Value"])

            tpMatrix[src][dst] = "%s" % proto

            gLogger.verbose("%s -> %s: %s" % (src, dst, proto))

        if multihopTab:
            hop = fts3Plugin.findMultiHopSEToCoverUpForWLCGFailure(
                ses[src].name, ses[dst].name)
            multihopMatrix[src][dst] = hop

    # Write the matrix in the file
    with open(outputFile, "w") as csvfile:
        csvWriter = csv.writer(csvfile,
                               delimiter=";",
                               quoting=csv.QUOTE_MINIMAL)

        if tpcTab:
            csvWriter.writerow(["Direct TPC"])

            csvWriter.writerow(["src/dst"] + targetSE)

            for src in fromSE:
                srcRow = [src]
                for dst in targetSE:
                    srcRow.append(tpMatrix[src].get(dst, "NA"))
                csvWriter.writerow(srcRow)

            # make an empty line
            # csvWriter.writerow([""] * (len(targetSE) + 1))
            csvWriter.writerow([])
            csvWriter.writerow([])
            csvWriter.writerow([])

        if ftsTab:
            csvWriter.writerow(["FTS3 transfers"])

            csvWriter.writerow(["src/dst"] + targetSE)

            for src in fromSE:
                srcRow = [src]
                for dst in targetSE:
                    srcRow.append(ftsMatrix[src].get(dst, "NA"))
                csvWriter.writerow(srcRow)

            csvWriter.writerow([])
            csvWriter.writerow([])
            csvWriter.writerow([])

        if multihopTab:
            csvWriter.writerow(["Multihop"])

            csvWriter.writerow(["src/dst"] + targetSE)

            for src in fromSE:
                srcRow = [src]
                for dst in targetSE:
                    srcRow.append(multihopMatrix[src].get(dst, "NA"))
                csvWriter.writerow(srcRow)

            csvWriter.writerow([])
            csvWriter.writerow([])
            csvWriter.writerow([])
    gLogger.notice("Wrote Matrix to", outputFile)
Ejemplo n.º 30
0
def main():
    global vo
    global noVOFlag
    global allVOsFlag

    Script.registerSwitch("V:", "vo=", "Virtual Organization", setVO)
    Script.registerSwitch("a", "all", "All Virtual Organizations flag",
                          setAllVO)
    Script.registerSwitch("n", "noVO",
                          "No Virtual Organizations assigned flag", setNoVO)

    Script.parseCommandLine()

    from DIRAC import gConfig, gLogger
    from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
    from DIRAC.Core.Utilities.PrettyPrint import printTable
    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup

    storageCFGBase = "/Resources/StorageElements"

    res = gConfig.getSections(storageCFGBase, True)
    if not res['OK']:
        gLogger.error('Failed to get storage element info')
        gLogger.error(res['Message'])
        DIRACexit(1)

    gLogger.info("%s %s %s" %
                 ('Storage Element'.ljust(25), 'Read Status'.rjust(15),
                  'Write Status'.rjust(15)))

    seList = sorted(res['Value'])

    resourceStatus = ResourceStatus()

    res = resourceStatus.getElementStatus(seList, "StorageElement")
    if not res['OK']:
        gLogger.error("Failed to get StorageElement status for %s" %
                      str(seList))
        DIRACexit(1)

    fields = ['SE', 'ReadAccess', 'WriteAccess', 'RemoveAccess', 'CheckAccess']
    records = []

    if vo is None and not allVOsFlag:
        result = getVOfromProxyGroup()
        if not result['OK']:
            gLogger.error('Failed to determine the user VO')
            DIRACexit(1)
        vo = result['Value']

    for se, statusDict in res['Value'].items():

        # Check if the SE is allowed for the user VO
        if not allVOsFlag:
            voList = gConfig.getValue('/Resources/StorageElements/%s/VO' % se,
                                      [])
            if noVOFlag and voList:
                continue
            if voList and vo not in voList:
                continue

        record = [se]
        for status in fields[1:]:
            value = statusDict.get(status, 'Unknown')
            record.append(value)
        records.append(record)

    printTable(fields, records, numbering=False, sortField='SE')

    DIRACexit(0)
Ejemplo n.º 31
0
gLogger.info( "%s %s %s" % ( 'Storage Element'.ljust( 25 ), 'Read Status'.rjust( 15 ), 'Write Status'.rjust( 15 ) ) )

seList = sorted( res[ 'Value' ] )

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus( seList )
if not res[ 'OK' ]:
  gLogger.error( "Failed to get StorageElement status for %s" % str( seList ) )
  DIRAC.exit( 1 )
  
fields = ['SE','ReadAccess','WriteAccess','RemoveAccess','CheckAccess']  
records = []

result = getVOfromProxyGroup()
if not result['OK']:
  gLogger.error( 'Failed to determine the user VO' )
  DIRAC.exit( -1 )
vo = result['Value']

for se, statusDict in res[ 'Value' ].items():

  # Check if the SE is allowed for the user VO
  voList = gConfig.getValue( '/Resources/StorageElements/%s/VO' % se, [] )
  if voList and not vo in voList:
    continue 
  
  record = [se]
  for status in fields[1:]:
    value = statusDict.get( status, 'Unknown' )
Ejemplo n.º 32
0
def constructUserLFNs(jobID, vo, owner, outputFiles, outputPath):
    """ This method is used to supplant the standard job wrapper output data policy
  for ILC.  The initial convention adopted for user output files is the following:

  If outputpath is not defined:
   * <vo>/user/<initial e.g. s>/<owner e.g. sposs>/<yearMonth e.g. 2010_02>/<subdir>/<fileName>
  Otherwise:
   * <vo>/user/<initial e.g. s>/<owner e.g. sposs>/<outputPath>/<fileName>

  :param int jobID: the jobID
  :param string vo: the vo of the owners proxy
  :param string owner: the username
  :param list outputFiles: the list of outputfiles found for the job
  :param string outputPath: the outputpath defined for the job
  :returns: S_OK with list of output file lfns
  """
    initial = owner[:1]
    subdir = str(jobID / 1000)
    timeTup = datetime.date.today().timetuple()
    yearMonth = '%s_%s' % (timeTup[0], string.zfill(str(timeTup[1]), 2))
    outputLFNs = {}
    if not vo:
        res = getVOfromProxyGroup()
        if not res['OK']:
            gLogger.error('Could not get VO from CS, assuming ilc')
            vo = 'ilc'
        else:
            vo = res['Value']
    ops = Operations(vo=vo)
    lfn_prefix = ops.getValue("LFNUserPrefix", "user")

    #Strip out any leading or trailing slashes but allow fine structure
    if outputPath:
        outputPathList = string.split(outputPath, os.sep)
        newPath = []
        for i in outputPathList:
            if i:
                newPath.append(i)
        outputPath = string.join(newPath, os.sep)

    if not isinstance(outputFiles, list):
        outputFiles = [outputFiles]

    for outputFile in outputFiles:
        #strip out any fine structure in the output file specified by the user, restrict to output file names
        #the output path field can be used to describe this
        outputFile = outputFile.replace('LFN:', '')
        lfn = ''
        if outputPath:
            lfn = os.sep + os.path.join(
                vo, lfn_prefix, initial, owner,
                outputPath + os.sep + os.path.basename(outputFile))
        else:
            lfn = os.sep + os.path.join(
                vo, lfn_prefix, initial, owner, yearMonth, subdir,
                str(jobID)) + os.sep + os.path.basename(outputFile)
        outputLFNs[outputFile] = lfn

    outputData = outputLFNs.values()
    if outputData:
        gLogger.info('Created the following output data LFN(s):\n%s' %
                     (string.join(outputData, '\n')))
    else:
        gLogger.info('No output LFN(s) constructed')

    return S_OK(outputData)
Ejemplo n.º 33
0
    def __init__(self, name, protocols=None, vo=None):
        """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

        self.vo = vo
        if not vo:
            result = getVOfromProxyGroup()
            if not result["OK"]:
                return result
            self.vo = result["Value"]
        self.opHelper = Operations(vo=self.vo)
        useProxy = gConfig.getValue("/LocalSite/StorageElements/%s/UseProxy" % name, False)
        if not useProxy:
            useProxy = self.opHelper.getValue("/Services/StorageElements/%s/UseProxy" % name, False)

        self.valid = True
        if protocols == None:
            res = StorageFactory(useProxy).getStorages(name, protocolList=[])
        else:
            res = StorageFactory(useProxy).getStorages(name, protocolList=protocols)
        if not res["OK"]:
            self.valid = False
            self.name = name
            self.errorReason = res["Message"]
        else:
            factoryDict = res["Value"]
            self.name = factoryDict["StorageName"]
            self.options = factoryDict["StorageOptions"]
            self.localProtocols = factoryDict["LocalProtocols"]
            self.remoteProtocols = factoryDict["RemoteProtocols"]
            self.storages = factoryDict["StorageObjects"]
            self.protocolOptions = factoryDict["ProtocolOptions"]
            self.turlProtocols = factoryDict["TurlProtocols"]

        self.log = gLogger.getSubLogger("SE[%s]" % self.name)

        self.readMethods = [
            "getFile",
            "getAccessUrl",
            "getTransportURL",
            "prestageFile",
            "prestageFileStatus",
            "getDirectory",
        ]

        self.writeMethods = [
            "retransferOnlineFile",
            "putFile",
            "replicateFile",
            "pinFile",
            "releaseFile",
            "createDirectory",
            "putDirectory",
        ]

        self.removeMethods = ["removeFile", "removeDirectory"]

        self.checkMethods = [
            "exists",
            "getDirectoryMetadata",
            "getDirectorySize",
            "getFileSize",
            "getFileMetadata",
            "listDirectory",
            "isDirectory",
            "isFile",
        ]

        self.okMethods = [
            "getLocalProtocols",
            "getPfnForProtocol",
            "getPfnForLfn",
            "getPfnPath",
            "getProtocols",
            "getRemoteProtocols",
            "getStorageElementName",
            "getStorageElementOption",
            "getStorageParameters",
            "isLocalSE",
        ]
Ejemplo n.º 34
0
    def __init__(self, name, plugins=None, vo=None, hideExceptions=False):
        """ c'tor

    :param str name: SE name
    :param list plugins: requested storage plugins
    :param: vo
    """

        self.methodName = None

        if vo:
            self.vo = vo
        else:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)

        # These things will soon have to go as well. 'AccessProtocol.1' is all but flexible.
        proxiedProtocols = gConfig.getValue(
            '/LocalSite/StorageElements/ProxyProtocols', "").split(',')
        self.useProxy = (gConfig.getValue(
            "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name,
            "UnknownProtocol") in proxiedProtocols)

        if not self.useProxy:
            self.useProxy = gConfig.getValue(
                '/LocalSite/StorageElements/%s/UseProxy' % name, False)
        if not self.useProxy:
            self.useProxy = self.opHelper.getValue(
                '/Services/StorageElements/%s/UseProxy' % name, False)

        self.valid = True
        if plugins == None:
            res = StorageFactory(useProxy=self.useProxy,
                                 vo=self.vo).getStorages(
                                     name,
                                     pluginList=[],
                                     hideExceptions=hideExceptions)
        else:
            res = StorageFactory(useProxy=self.useProxy,
                                 vo=self.vo).getStorages(
                                     name,
                                     pluginList=plugins,
                                     hideExceptions=hideExceptions)

        if not res['OK']:
            self.valid = False
            self.name = name
            self.errorReason = res['Message']
        else:
            factoryDict = res['Value']
            self.name = factoryDict['StorageName']
            self.options = factoryDict['StorageOptions']
            self.localPlugins = factoryDict['LocalPlugins']
            self.remotePlugins = factoryDict['RemotePlugins']
            self.storages = factoryDict['StorageObjects']
            self.protocolOptions = factoryDict['ProtocolOptions']
            self.turlProtocols = factoryDict['TurlProtocols']

            for storage in self.storages:

                storage.setStorageElement(self)

        self.log = gLogger.getSubLogger("SE[%s]" % self.name)

        if self.valid:

            self.useCatalogURL = gConfig.getValue(
                '/Resources/StorageElements/%s/UseCatalogURL' % self.name,
                False)
            self.log.debug("useCatalogURL: %s" % self.useCatalogURL)

            self.__dmsHelper = DMSHelpers(vo=vo)

            # Allow SE to overwrite general operation config
            accessProto = self.options.get('AccessProtocols')
            self.localAccessProtocolList = accessProto if accessProto else self.__dmsHelper.getAccessProtocols(
            )
            self.log.debug("localAccessProtocolList %s" %
                           self.localAccessProtocolList)

            writeProto = self.options.get('WriteProtocols')
            self.localWriteProtocolList = writeProto if writeProto else self.__dmsHelper.getWriteProtocols(
            )
            self.log.debug("localWriteProtocolList %s" %
                           self.localWriteProtocolList)

        #                         'getTransportURL',
        self.readMethods = [
            'getFile', 'prestageFile', 'prestageFileStatus', 'getDirectory'
        ]

        self.writeMethods = [
            'retransferOnlineFile', 'putFile', 'replicateFile', 'pinFile',
            'releaseFile', 'createDirectory', 'putDirectory'
        ]

        self.removeMethods = ['removeFile', 'removeDirectory']

        self.checkMethods = [
            'exists',
            'getDirectoryMetadata',
            'getDirectorySize',
            'getFileSize',
            'getFileMetadata',
            'listDirectory',
            'isDirectory',
            'isFile',
        ]

        self.okMethods = [
            'getLocalProtocols', 'getProtocols', 'getRemoteProtocols',
            'getStorageElementName', 'getStorageParameters', 'getTransportURL',
            'isLocalSE'
        ]

        self.__fileCatalog = None
Ejemplo n.º 35
0
        fromSE = list(seForSeBases)
      elif not targetSE:
        targetSE = list(seForSeBases)

  fromSE = sorted(fromSE)
  targetSE = sorted(targetSE)

  gLogger.notice("Using sources: %s" % ','.join(fromSE))
  gLogger.notice("Using target: %s" % ','.join(targetSE))

  # Now we construct the SE object for each SE that we want to appear
  ses = {}
  for se in set(fromSE + targetSE):
    ses[se] = StorageElement(seForSeBases[se])

  ret = getVOfromProxyGroup()
  if not ret['OK'] or not ret.get('Value', ''):
    gLogger.error('Aborting, Bad Proxy:', ret.get('Message', 'Proxy does not belong to a VO!'))
    exit(1)
  vo = ret['Value']
  gLogger.notice('Using the Virtual Organization:', vo)
  # dummy LFN, still has to follow lfn convention
  lfn = '/%s/toto.xml' % vo

  # Create a matrix of protocol src/dest

  tpMatrix = defaultdict(dict)

  # For each source and destination, generate the url pair, and the compatible third party protocols
  for src, dst in ((x, y) for x in fromSE for y in targetSE):
    res = ses[dst].generateTransferURLsBetweenSEs(lfn, ses[src], thirdPartyProtocols)
Ejemplo n.º 36
0
    def execute(self):
        """ Main execution function.
        """
        #Have to work out if the module is part of the last step i.e. 
        #user jobs can have any number of steps and we only want 
        #to run the finalization once.
        currentStep = int(self.step_commons['STEP_NUMBER'])
        totalSteps = int(self.workflow_commons['TotalSteps'])
        if currentStep == totalSteps:
            self.lastStep = True
        else:
            self.log.verbose('Current step = %s, total steps of workflow = %s, UserJobFinalization will enable itself only \
            at the last workflow step.' % (currentStep, totalSteps))            
            
        if not self.lastStep:
            #Not last step, do nothing, proceed happily.
            return S_OK()
        
        result = self.resolveInputVariables()
        if not result['OK']:
            self.log.error("Failed to resolve input parameters:", result['Message'])
            return result
        
        self.log.info('Initializing %s' % self.version)
        if not self.workflowStatus['OK'] or not self.stepStatus['OK']:
            ##Something went wrong in the step or the workflow, do nothing.
            self.log.verbose('Workflow status = %s, step status = %s' % (self.workflowStatus['OK'], 
                                                                       self.stepStatus['OK']))
            return S_OK('No output data upload attempted')
        
        self.request.RequestName = 'job_%d_request.xml' % int(self.jobID)
        self.request.JobID = self.jobID
        self.request.SourceComponent = "Job_%d" % int(self.jobID)
        
        if not self.userOutputData:
            self.log.info('No user output data is specified for this job, nothing to do')
            return S_OK('No output data to upload')
            
        #Determine the final list of possible output files for the
        #workflow and all the parameters needed to upload them.
        outputList = []
        possible_files= []
        for i in self.userOutputData:
            files = getGlobbedFiles(i)
            for possible_file in files:
                if possible_file in possible_files:
                    #Don't have twice the same file
                    continue
                outputList.append({'outputDataType' : i.split('.')[-1].upper(),#this would be used to sort the files in different dirs
                                   'outputDataSE' : self.userOutputSE,
                                   'outputFile' : os.path.basename(possible_file)})
                possible_files.append(os.path.basename(possible_file))
                
        self.log.info('Constructing user output LFN(s) for %s' % (', '.join(self.userOutputData)))
        if not self.jobID:
            self.jobID = 12345
        owner = ''
        if 'Owner' in self.workflow_commons:
            owner = self.workflow_commons['Owner']
        else:
            res = getCurrentOwner()
            if not res['OK']:
                self.log.error('Could not find proxy')
                return S_ERROR('Could not obtain owner from proxy')
            owner = res['Value']
        vo = ''
        if self.workflow_commons.has_key('VO'):
            vo = self.workflow_commons['VO']
        else:
            res = getVOfromProxyGroup()
            if not res['OK']:
                self.log.error('Failed finding the VO')
                return S_ERROR('Could not obtain VO from proxy')
            vo = res['Value']
        result = constructUserLFNs(int(self.jobID), vo, owner, 
                                   possible_files, self.userOutputPath)
        if not result['OK']:
            self.log.error('Could not create user LFNs', result['Message'])
            return result
        userOutputLFNs = result['Value']
        
        self.log.verbose('Calling getCandidateFiles( %s, %s)' % (outputList, userOutputLFNs))
        result = self.getCandidateFiles(outputList, userOutputLFNs)
        if not result['OK']:
            if not self.ignoreapperrors:
                self.log.error(result['Message'])
                self.setApplicationStatus(result['Message'])
                return S_OK()
        
        fileDict = result['Value']
        result = self.getFileMetadata(fileDict)
        if not result['OK']:
            if not self.ignoreapperrors:
                self.log.error(result['Message'])
                self.setApplicationStatus(result['Message'])
                return S_OK()
        
        if not result['Value']:
            if not self.ignoreapperrors:
                self.log.info('No output data files were determined to be uploaded for this workflow')
                self.setApplicationStatus('No Output Data Files To Upload')
                return S_OK()
        
        fileMetadata = result['Value']

        orderedSEs = self.userOutputSE
        
        
        self.log.info('Ordered list of output SEs is: %s' % (', '.join(orderedSEs)))    
        final = {}
        for fileName, metadata in fileMetadata.items():
            final[fileName] = metadata
            final[fileName]['resolvedSE'] = orderedSEs
        
        #At this point can exit and see exactly what the module will upload
        if not self.enable:
            self.log.info('Module is disabled by control flag, would have attempted \
to upload the following files %s' % ', '.join(final.keys()))
            for fileName, metadata in final.items():
                self.log.info('--------%s--------' % fileName)
                for n, v in metadata.items():
                    self.log.info('%s = %s' %(n, v))
            
            return S_OK('Module is disabled by control flag')
        
        #Instantiate the failover transfer client with the global request object
        failoverTransfer = FailoverTransfer(self.request)
        
        #One by one upload the files with failover if necessary
        replication = {}
        failover = {}
        uploaded = []
        if not self.failoverTest:
            for fileName, metadata in final.items():
                self.log.info("Attempting to store file %s to the following SE(s):\n%s" % (fileName, 
                                                                                           ', '.join(metadata['resolvedSE'])))
                replicateSE = ''
                result = failoverTransfer.transferAndRegisterFile(fileName, metadata['localpath'], metadata['lfn'],
                                                                  metadata['resolvedSE'], fileMetaDict = metadata, 
                                                                  fileCatalog = self.userFileCatalog)
                if not result['OK']:
                    self.log.error('Could not transfer and register %s with metadata:\n %s' % (fileName, metadata))
                    failover[fileName] = metadata
                else:
                    #Only attempt replication after successful upload
                    lfn = metadata['lfn']
                    uploaded.append(lfn)          
                    seList = metadata['resolvedSE']
                    
                    if result['Value'].has_key('uploadedSE'):
                        uploadedSE = result['Value']['uploadedSE']            
                        for se in seList:
                            if not se == uploadedSE:
                                replicateSE = se
                                break
                  
                if replicateSE and lfn:
                    self.log.info('Will attempt to replicate %s to %s' % (lfn, replicateSE))    
                    replication[lfn] = replicateSE            
        else:
            failover = final
        
        cleanUp = False
        for fileName, metadata in failover.items():
            random.shuffle(self.failoverSEs)
            targetSE = metadata['resolvedSE'][0]
            metadata['resolvedSE'] = self.failoverSEs
            result = failoverTransfer.transferAndRegisterFileFailover(fileName,
                                                                      metadata['localpath'],
                                                                      metadata['lfn'],
                                                                      targetSE,
                                                                      self.failoverSEs,
                                                                      fileMetaDict = metadata,
                                                                      fileCatalog = self.userFileCatalog)
            if not result['OK']:
                self.log.error('Could not transfer and register %s with metadata:\n %s' % (fileName, metadata))
                cleanUp = True
                continue #for users can continue even if one completely fails
            else:
                lfn = metadata['lfn']
                uploaded.append(lfn)
        
        #For files correctly uploaded must report LFNs to job parameters
        if uploaded:
            report = ', '.join( uploaded )
            self.jobReport.setJobParameter( 'UploadedOutputData', report )
        
        self.request = failoverTransfer.request
        
        #If some or all of the files failed to be saved to failover
        if cleanUp:
            self.workflow_commons['Request'] = self.request
            #Leave any uploaded files just in case it is useful for the user
            #do not try to replicate any files.
            return S_ERROR('Failed To Upload Output Data')
        
        #If there is now at least one replica for uploaded files can trigger replication
        rm = ReplicaManager()
        self.log.info('Sleeping for 10 seconds before attempting replication of recently uploaded files')
        time.sleep(10)
        for lfn, repSE in replication.items():
            result = rm.replicateAndRegister(lfn, repSE, catalog = self.userFileCatalog)
            if not result['OK']:
                self.log.info('Replication failed with below error but file already exists in Grid storage with \
                at least one replica:\n%s' % (result))
        
        self.workflow_commons['Request'] = self.request
        self.generateFailoverFile()    
        
        self.setApplicationStatus('Job Finished Successfully')
        return S_OK('Output data uploaded')
Ejemplo n.º 37
0
def main():
    global fullMatch
    global sites
    Script.registerSwitch("F", "full-match", "Check all the matching criteria",
                          setFullMatch)
    Script.registerSwitch(
        "S:", "site=", "Check matching for these sites (comma separated list)",
        setSites)
    Script.registerArgument("job_JDL: file with job JDL description")
    _, args = Script.parseCommandLine(ignoreErrors=True)

    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
    from DIRAC.ConfigurationSystem.Client.Helpers import Resources
    from DIRAC.Core.Utilities.PrettyPrint import printTable
    from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
    from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
    from DIRAC.WorkloadManagementSystem.Utilities.QueueUtilities import getQueuesResolved, matchQueue

    with open(args[0]) as f:
        jdl = f.read()

    # Get the current VO
    result = getVOfromProxyGroup()
    if not result["OK"]:
        gLogger.error("No proxy found, please login")
        DIRACExit(-1)
    voName = result["Value"]

    resultQueues = Resources.getQueues(siteList=sites, community=voName)
    if not resultQueues["OK"]:
        gLogger.error("Failed to get CE information")
        DIRACExit(-1)
    siteDict = resultQueues["Value"]
    result = getQueuesResolved(siteDict, {}, checkPlatform=True)
    if not resultQueues["OK"]:
        gLogger.error("Failed to get CE information")
        DIRACExit(-1)
    queueDict = result["Value"]

    # get list of usable sites within this cycle
    resultMask = SiteStatus().getUsableSites()
    if not resultMask["OK"]:
        gLogger.error("Failed to get Site mask information")
        DIRACExit(-1)
    siteMaskList = resultMask.get("Value", [])

    rssClient = ResourceStatus()

    fields = ("Site", "CE", "Queue", "Status", "Match", "Reason")
    records = []

    for queue, queueInfo in queueDict.items():
        site = queueInfo["Site"]
        ce = queueInfo["CEName"]
        siteStatus = "Active" if site in siteMaskList else "InActive"
        ceStatus = siteStatus
        if rssClient.rssFlag:
            result = rssClient.getElementStatus(ce, "ComputingElement")
            if result["OK"]:
                ceStatus = result["Value"][ce]["all"]

        result = matchQueue(jdl,
                            queueInfo["ParametersDict"],
                            fullMatch=fullMatch)
        if not result["OK"]:
            gLogger.error("Failed in getting match data", result["Message"])
            DIRACExit(-1)
        status = "Active" if siteStatus == "Active" and ceStatus == "Active" else "Inactive"
        if result["Value"]["Match"]:
            records.append(
                (site, ce, queueInfo["QueueName"], status, "Yes", ""))
        else:
            records.append((site, ce, queueInfo["QueueName"], status, "No",
                            result["Value"]["Reason"]))

    gLogger.notice(
        printTable(fields,
                   records,
                   sortField="Site",
                   columnSeparator="  ",
                   printOut=False))
Ejemplo n.º 38
0
Usage:
  %s [<options>]
""" % Script.scriptName)

Script.parseCommandLine()

import DIRAC
from DIRAC import gConfig, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Utilities.List import sortList
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup

if __name__ == "__main__":

    result = getVOfromProxyGroup()
    if not result['OK']:
        gLogger.notice('Error:', result['Message'])
        DIRAC.exit(1)
    vo = result['Value']
    resources = Resources(vo=vo)
    result = resources.getEligibleStorageElements()
    if not result['OK']:
        gLogger.notice('Error:', result['Message'])
        DIRAC.exit(2)
    seList = sortList(result['Value'])

    resourceStatus = ResourceStatus()

    result = resourceStatus.getStorageStatus(seList)
    if not result['OK']:
Ejemplo n.º 39
0
    def __init__(self, catalogs=None, vo=None):
        """Default constructor"""
        self.valid = True
        self.timeout = 180

        self.ro_methods = set()
        self.write_methods = set()
        self.no_lfn_methods = set()

        self.readCatalogs = []
        self.writeCatalogs = []
        self.rootConfigPath = "/Resources/FileCatalogs"
        self.vo = vo if vo else getVOfromProxyGroup().get("Value", None)
        self.log = gLogger.getSubLogger(self.__class__.__name__)

        self.opHelper = Operations(vo=self.vo)

        catalogList = []
        if isinstance(catalogs, six.string_types):
            catalogList = [catalogs]
        elif isinstance(catalogs, (list, tuple)):
            catalogList = list(catalogs)

        if catalogList:
            result = self._getEligibleCatalogs()
            if not result["OK"]:
                self.log.error("Failed to get eligible catalog")
                return
            eligibleFileCatalogs = result["Value"]
            catalogCheck = True
            for catalog in catalogList:
                if catalog not in eligibleFileCatalogs:
                    self.log.error("Specified catalog is not eligible", catalog)
                    catalogCheck = False
            if catalogCheck:
                result = self._getSelectedCatalogs(catalogList)
            else:
                result = S_ERROR("Specified catalog is not eligible")
        else:
            result = self._getCatalogs()
        if not result["OK"]:
            self.log.error("Failed to create catalog objects")
            self.valid = False
        elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0):
            self.log.error("No catalog object created")
            self.valid = False

        result = self.getMasterCatalogNames()
        masterCatalogs = result["Value"]
        # There can not be more than one master catalog
        haveMaster = False
        if len(masterCatalogs) > 1:
            self.log.error("More than one master catalog created")
            self.valid = False
        elif len(masterCatalogs) == 1:
            haveMaster = True

        # Get the list of write methods
        if haveMaster:
            # All the write methods must be present in the master
            _catalogName, oCatalog, _master = self.writeCatalogs[0]
            _roList, writeList, nolfnList = oCatalog.getInterfaceMethods()
            self.write_methods.update(writeList)
            self.no_lfn_methods.update(nolfnList)
        else:
            for _catalogName, oCatalog, _master in self.writeCatalogs:
                _roList, writeList, nolfnList = oCatalog.getInterfaceMethods()
                self.write_methods.update(writeList)
                self.no_lfn_methods.update(nolfnList)

        # Get the list of read methods
        for _catalogName, oCatalog, _master in self.readCatalogs:
            roList, _writeList, nolfnList = oCatalog.getInterfaceMethods()
            self.ro_methods.update(roList)
            self.no_lfn_methods.update(nolfnList)

        self.condParser = FCConditionParser(vo=self.vo, ro_methods=self.ro_methods)
Ejemplo n.º 40
0
def main():
    global fullMatch
    global sites
    Script.registerSwitch("F", "full-match", "Check all the matching criteria",
                          setFullMatch)
    Script.registerSwitch(
        "S:", "site=", "Check matching for these sites (comma separated list)",
        setSites)

    Script.parseCommandLine(ignoreErrors=True)
    args = Script.getPositionalArgs()

    if len(args) == 0:
        gLogger.error("Error: No job description provided")
        Script.showHelp(exitCode=1)

    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
    from DIRAC.ConfigurationSystem.Client.Helpers import Resources
    from DIRAC.Core.Utilities.PrettyPrint import printTable
    from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
    from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
    from DIRAC.WorkloadManagementSystem.Utilities.QueueUtilities import getQueuesResolved, matchQueue

    with open(args[0]) as f:
        jdl = f.read()

    # Get the current VO
    result = getVOfromProxyGroup()
    if not result['OK']:
        gLogger.error('No proxy found, please login')
        DIRACExit(-1)
    voName = result['Value']

    resultQueues = Resources.getQueues(siteList=sites, community=voName)
    if not resultQueues['OK']:
        gLogger.error('Failed to get CE information')
        DIRACExit(-1)
    siteDict = resultQueues['Value']
    result = getQueuesResolved(siteDict)
    if not resultQueues['OK']:
        gLogger.error('Failed to get CE information')
        DIRACExit(-1)
    queueDict = result['Value']

    # get list of usable sites within this cycle
    resultMask = SiteStatus().getUsableSites()
    if not resultMask['OK']:
        gLogger.error('Failed to get Site mask information')
        DIRACExit(-1)
    siteMaskList = resultMask.get('Value', [])

    rssClient = ResourceStatus()

    fields = ('Site', 'CE', 'Queue', 'Status', 'Match', 'Reason')
    records = []

    for queue, queueInfo in queueDict.items():
        site = queueInfo['Site']
        ce = queueInfo['CEName']
        siteStatus = "Active" if site in siteMaskList else "InActive"
        ceStatus = siteStatus
        if rssClient.rssFlag:
            result = rssClient.getElementStatus(ce, "ComputingElement")
            if result['OK']:
                ceStatus = result['Value'][ce]['all']

        result = matchQueue(jdl, queueInfo, fullMatch=fullMatch)
        if not result['OK']:
            gLogger.error('Failed in getting match data', result['Message'])
            DIRACExit(-1)
        status = "Active" if siteStatus == "Active" and ceStatus == "Active" else "Inactive"
        if result['Value']['Match']:
            records.append((site, ce, queueInfo['Queue'], status, 'Yes', ''))
        else:
            records.append((site, ce, queueInfo['Queue'], status, 'No',
                            result['Value']['Reason']))

    gLogger.notice(
        printTable(fields,
                   records,
                   sortField='Site',
                   columnSeparator='  ',
                   printOut=False))
Ejemplo n.º 41
0
    def __init__(self, **options):
        """
        Constructor. Takes options defined in Resources and Operations for this client.

        :param options: options dict
        """
        self.diracScopeAlg = options.get("DiracScopeAlg", "dirac")
        self.useDiracCS = False  # use a Rucio config file
        self.convertUnicode = True
        proxyInfo = {"OK": False}
        if os.getenv("RUCIO_AUTH_TYPE"
                     ) == "x509_proxy" and not os.getenv("X509_USER_PROXY"):
            proxyInfo = getProxyInfo(disableVOMS=True)
            if proxyInfo["OK"]:
                os.environ["X509_USER_PROXY"] = proxyInfo["Value"]["path"]
                sLog.debug("X509_USER_PROXY not defined. Using %s" %
                           proxyInfo["Value"]["path"])
        try:
            try:
                self._client = Client()
                self.account = self._client.account
            except (CannotAuthenticate, MissingClientParameter):
                if os.getenv("RUCIO_AUTH_TYPE") == "x509_proxy":
                    if not proxyInfo["OK"]:
                        proxyInfo = getProxyInfo(disableVOMS=True)
                    if proxyInfo["OK"]:
                        dn = proxyInfo["Value"]["identity"]
                        username = proxyInfo["Value"]["username"]
                        self.account = username
                        sLog.debug(
                            "Switching to account %s mapped to proxy %s" %
                            (username, dn))

            try:
                self._client = Client(account=self.account)
                self.scopes = self._client.list_scopes()
            except Exception as err:
                sLog.error(
                    "Cannot instantiate RucioFileCatalog interface using a config file",
                    "error : %s" % repr(err))
                sLog.info(("will try using Dirac CS"))

        except Exception as err:
            # instantiate the client w/o a config file
            sLog.debug(
                "instantiate the client w/o a config file -  take config params from the CS"
            )
            self.useDiracCS = True
            proxyInfo = getProxyInfo(disableVOMS=True)
            if proxyInfo["OK"]:
                proxyDict = proxyInfo["Value"]
                self.proxyPath = proxyDict.get("path", None)
                self.username = proxyDict.get("username", None)
            else:
                sLog.error("Cannot instantiate RucioFileCatalog interface",
                           proxyInfo["Message"])
                return
            self.VO = getVOfromProxyGroup()["Value"]
            self.rucioHost = options.get("RucioHost", None)
            self.authHost = options.get("AuthHost", None)
            self.caCertPath = Locations.getCAsLocation()
            try:
                sLog.info("Logging in with a proxy located at: %s" %
                          self.proxyPath)
                sLog.debug("account: ", self.username)
                sLog.debug("rucio host: ", self.rucioHost)
                sLog.debug("auth  host: ", self.authHost)
                sLog.debug("CA cert path: ", self.caCertPath)
                sLog.debug("VO: ", self.VO)

                self._client = Client(
                    account=self.username,
                    rucio_host=self.rucioHost,
                    auth_host=self.authHost,
                    ca_cert=self.caCertPath,
                    auth_type="x509_proxy",
                    creds={"client_proxy": self.proxyPath},
                    timeout=600,
                    user_agent="rucio-clients",
                    vo=self.VO,
                )

                sLog.debug(
                    "Rucio client instantiated successfully for VO %s and  account %s "
                    % (self.VO, self.username))
            except Exception as err:
                sLog.error("Cannot instantiate RucioFileCatalog interface",
                           "error : %s" % repr(err))
Ejemplo n.º 42
0
  def __init__( self, catalogs = None, vo = None ):
    """ Default constructor
    """
    self.valid = True
    self.timeout = 180

    self.ro_methods = set()
    self.write_methods = set()
    self.no_lfn_methods = set()

    self.readCatalogs = []
    self.writeCatalogs = []
    self.rootConfigPath = '/Resources/FileCatalogs'
    self.vo = vo if vo else getVOfromProxyGroup().get( 'Value', None )
    self.log = gLogger.getSubLogger( "FileCatalog" )

    self.opHelper = Operations( vo = self.vo )

    catalogList = []
    if isinstance( catalogs, basestring ):
      catalogList = [catalogs]
    elif isinstance( catalogs, ( list, tuple ) ):
      catalogList = list( catalogs )

    if catalogList:
      result = self._getEligibleCatalogs()
      if not result['OK']:
        self.log.error( "Failed to get eligible catalog" )
        return
      eligibleFileCatalogs = result['Value']
      catalogCheck = True
      for catalog in catalogList:
        if catalog not in eligibleFileCatalogs:
          self.log.error( "Specified catalog is not eligible", catalog )
          catalogCheck = False
      if catalogCheck:
        result = self._getSelectedCatalogs( catalogList )
      else:
        result = S_ERROR( "Specified catalog is not eligible" )
    else:
      result = self._getCatalogs()
    if not result['OK']:
      self.log.error( "Failed to create catalog objects" )
      self.valid = False
    elif ( len( self.readCatalogs ) == 0 ) and ( len( self.writeCatalogs ) == 0 ):
      self.log.error( "No catalog object created" )
      self.valid = False

    result = self.getMasterCatalogNames()
    masterCatalogs = result['Value']
    # There can not be more than one master catalog
    haveMaster = False
    if len( masterCatalogs ) > 1:
      self.log.error( "More than one master catalog created" )
      self.valid = False
    elif len( masterCatalogs ) == 1:
      haveMaster = True

    # Get the list of write methods
    if haveMaster:
      # All the write methods must be present in the master
      _catalogName, oCatalog, _master = self.writeCatalogs[0]
      _roList, writeList, nolfnList = oCatalog.getInterfaceMethods()
      self.write_methods.update( writeList )
      self.no_lfn_methods.update( nolfnList )
    else:
      for _catalogName, oCatalog, _master in self.writeCatalogs:
        _roList, writeList, nolfnList = oCatalog.getInterfaceMethods()
        self.write_methods.update( writeList )
        self.no_lfn_methods.update( nolfnList )

    # Get the list of read methods
    for _catalogName, oCatalog, _master in self.readCatalogs:
      roList, _writeList, nolfnList = oCatalog.getInterfaceMethods()
      self.ro_methods.update( roList )
      self.no_lfn_methods.update( nolfnList )

    self.condParser = FCConditionParser( vo = self.vo, ro_methods = self.ro_methods )
Ejemplo n.º 43
0
  def __init__( self, name, protocols = None, vo = None ):
    """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

    self.vo = vo
    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return result
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )
    useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
    if not useProxy:
      useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )

    self.valid = True
    if protocols == None:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = [] )
    else:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = protocols )
    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localProtocols = factoryDict['LocalProtocols']
      self.remoteProtocols = factoryDict['RemoteProtocols']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']

    self.log = gLogger.getSubLogger( "SE[%s]" % self.name )

    self.readMethods = [ 'getFile',
                         'getAccessUrl',
                         'getTransportURL',
                         'prestageFile',
                         'prestageFileStatus',
                         'getDirectory']

    self.writeMethods = [ 'retransferOnlineFile',
                          'putFile',
                          'replicateFile',
                          'pinFile',
                          'releaseFile',
                          'createDirectory',
                          'putDirectory' ]

    self.removeMethods = [ 'removeFile', 'removeDirectory' ]

    self.checkMethods = [ 'exists',
                          'getDirectoryMetadata',
                          'getDirectorySize',
                          'getFileSize',
                          'getFileMetadata',
                          'getLocalProtocols',
                          'getPfnForProtocol',
                          'getPfnForLfn',
                          'getPfnPath',
                          'getProtocols',
                          'getRemoteProtocols',
                          'getStorageElementName',
                          'getStorageElementOption',
                          'getStorageParameters',
                          'listDirectory',
                          'isDirectory',
                          'isFile',
                          'isLocalSE' ]
Ejemplo n.º 44
0
                final_dict[site].update({
                    "Tags": ldapTag(ce, vo),
                    "CE": curr_ces
                })
                #final_dict[site]={"Tags":ldapTag(ce,vo),"CE":[ce]}
    ret_dict = {}
    for key in final_dict:
        if len(final_dict[key]['CE']) != 0:
            ret_dict[key] = final_dict[key]
    return ret_dict


if __name__ == "__main__":
    Script.parseCommandLine()
    vo = "glast.org"
    res = getVOfromProxyGroup()
    if not res['OK']:
        gLogger.error(res['Message'])
        gLogger.error('Could not get VO from CS, assuming glast.org')
        dexit(1)
    else:
        gLogger.info(res)
        vo = res['Value']
    d = main(vo)
    for key in d:
        print('Name of DIRAC site %s\nName of CEs: %s' %
              (key, str(d[key]["CE"])))
        for tag in d[key]["Tags"]:
            print('\t%s' % tag)
        print('\n')
Ejemplo n.º 45
0
  def getCEStatus(self):
    """ Method to return information on running and pending jobs.
        We hope to satisfy both instances that use robot proxies and those which use proper configurations.
    """

    result = self._prepareProxy()
    if not result['OK']:
      self.log.error('ARCComputingElement: failed to set up proxy', result['Message'])
      return result
    self.usercfg.ProxyPath(os.environ['X509_USER_PROXY'])

    # Try to find out which VO we are running for.
    vo = ''
    res = getVOfromProxyGroup()
    if res['OK']:
      vo = res['Value']

    result = S_OK()
    result['SubmittedJobs'] = 0
    if not vo:
      # Presumably the really proper way forward once the infosys-discuss WG comes up with a solution
      # and it is implemented. Needed for DIRAC instances which use robot certificates for pilots.
      endpoints = [arc.Endpoint(str("ldap://" + self.ceHost + "/MDS-Vo-name=local,o=grid"),
                                arc.Endpoint.COMPUTINGINFO, 'org.nordugrid.ldapng')]
      retriever = arc.ComputingServiceRetriever(self.usercfg, endpoints)
      retriever.wait()  # Takes a bit of time to get and parse the ldap information
      targets = retriever.GetExecutionTargets()
      ceStats = targets[0].ComputingShare
      self.log.debug("Running jobs for CE %s : %s" % (self.ceHost, ceStats.RunningJobs))
      self.log.debug("Waiting jobs for CE %s : %s" % (self.ceHost, ceStats.WaitingJobs))
      result['RunningJobs'] = ceStats.RunningJobs
      result['WaitingJobs'] = ceStats.WaitingJobs
    else:
      # The system which works properly at present for ARC CEs that are configured correctly.
      # But for this we need the VO to be known - ask me (Raja) for the whole story if interested.
      # cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' % (
      #     self.ceHost, vo.lower())
      if not self.queue:
        self.log.error('ARCComputingElement: No queue ...')
        res = S_ERROR('Unknown queue (%s) failure for site %s' % (self.queue, self.ceHost))
        return res
      cmd1 = "ldapsearch -x -o ldif-wrap=no -LLL -h %s:2135  -b \'o=glue\' " % self.ceHost
      cmd2 = '"(&(objectClass=GLUE2MappingPolicy)(GLUE2PolicyRule=vo:%s))"' % vo.lower()
      cmd3 = ' | grep GLUE2MappingPolicyShareForeignKey | grep %s' % (self.queue.split("-")[-1])
      cmd4 = ' | sed \'s/GLUE2MappingPolicyShareForeignKey: /GLUE2ShareID=/\' '
      cmd5 = ' | xargs -L1 ldapsearch -x -o ldif-wrap=no -LLL -h %s:2135 -b \'o=glue\' ' % self.ceHost
      cmd6 = ' | egrep \'(ShareWaiting|ShareRunning)\''
      res = shellCall(0, cmd1 + cmd2 + cmd3 + cmd4 + cmd5 + cmd6)
      if not res['OK']:
        self.log.debug("Could not query CE %s - is it down?" % self.ceHost)
        return res
      try:
        ldapValues = res['Value'][1].split("\n")
        running = [lValue for lValue in ldapValues if 'GLUE2ComputingShareRunningJobs' in lValue]
        waiting = [lValue for lValue in ldapValues if 'GLUE2ComputingShareWaitingJobs' in lValue]
        result['RunningJobs'] = int(running[0].split(":")[1])
        result['WaitingJobs'] = int(waiting[0].split(":")[1])
      except IndexError:
        res = S_ERROR('Unknown ldap failure for site %s' % self.ceHost)
        return res

    return result
Ejemplo n.º 46
0
  def __init__(self, name, plugins=None, vo=None, hideExceptions=False):
    """ c'tor

    :param str name: SE name
    :param list plugins: requested storage plugins
    :param vo: vo

    """

    self.methodName = None

    if vo:
      self.vo = vo
    else:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      self.vo = result['Value']
    self.opHelper = Operations(vo=self.vo)

    # These things will soon have to go as well. 'AccessProtocol.1' is all but flexible.
    proxiedProtocols = gConfig.getValue('/LocalSite/StorageElements/ProxyProtocols', "").split(',')
    self.useProxy = (
        gConfig.getValue(
            "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" %
            name, "UnknownProtocol") in proxiedProtocols)

    if not self.useProxy:
      self.useProxy = gConfig.getValue('/LocalSite/StorageElements/%s/UseProxy' % name, False)
    if not self.useProxy:
      self.useProxy = self.opHelper.getValue('/Services/StorageElements/%s/UseProxy' % name, False)

    self.valid = True
    if plugins is None:
      res = StorageFactory(
          useProxy=self.useProxy,
          vo=self.vo).getStorages(
              name,
              pluginList=[],
              hideExceptions=hideExceptions)
    else:
      res = StorageFactory(
          useProxy=self.useProxy,
          vo=self.vo).getStorages(
              name,
              pluginList=plugins,
              hideExceptions=hideExceptions)

    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localPlugins = factoryDict['LocalPlugins']
      self.remotePlugins = factoryDict['RemotePlugins']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']

      for storage in self.storages:

        storage.setStorageElement(self)

    self.log = gLogger.getSubLogger("SE[%s]" % self.name)

    if self.valid:

      self.useCatalogURL = gConfig.getValue(
          '/Resources/StorageElements/%s/UseCatalogURL' %
          self.name, False)
      self.log.debug("useCatalogURL: %s" % self.useCatalogURL)

      self.__dmsHelper = DMSHelpers(vo=vo)

      # Allow SE to overwrite general operation config
      accessProto = self.options.get('AccessProtocols')
      self.localAccessProtocolList = accessProto if accessProto else self.__dmsHelper.getAccessProtocols()
      self.log.debug("localAccessProtocolList %s" % self.localAccessProtocolList)

      writeProto = self.options.get('WriteProtocols')
      self.localWriteProtocolList = writeProto if writeProto else self.__dmsHelper.getWriteProtocols()
      self.log.debug("localWriteProtocolList %s" % self.localWriteProtocolList)

    #                         'getTransportURL',
    self.readMethods = ['getFile',
                        'prestageFile',
                        'prestageFileStatus',
                        'getDirectory']

    self.writeMethods = ['retransferOnlineFile',
                         'putFile',
                         'replicateFile',
                         'pinFile',
                         'releaseFile',
                         'createDirectory',
                         'putDirectory']

    self.removeMethods = ['removeFile',
                          'removeDirectory']

    self.checkMethods = ['exists',
                         'getDirectoryMetadata',
                         'getDirectorySize',
                         'getFileSize',
                         'getFileMetadata',
                         'listDirectory',
                         'isDirectory',
                         'isFile',
                         'getOccupancy'
                         ]

    self.okMethods = ['getLocalProtocols',
                      'getProtocols',
                      'getRemoteProtocols',
                      'storageElementName',
                      'getStorageParameters',
                      'getTransportURL',
                      'isLocalSE']

    self.__fileCatalog = None
Ejemplo n.º 47
0
    def __init__(self, name, protocols=None, vo=None):
        """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    :param vo
    """

        self.methodName = None

        if vo:
            self.vo = vo
        else:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)

        proxiedProtocols = gConfig.getValue(
            '/LocalSite/StorageElements/ProxyProtocols', "").split(',')
        useProxy = (gConfig.getValue(
            "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name,
            "UnknownProtocol") in proxiedProtocols)

        if not useProxy:
            useProxy = gConfig.getValue(
                '/LocalSite/StorageElements/%s/UseProxy' % name, False)
        if not useProxy:
            useProxy = self.opHelper.getValue(
                '/Services/StorageElements/%s/UseProxy' % name, False)

        self.valid = True
        if protocols == None:
            res = StorageFactory(useProxy=useProxy,
                                 vo=self.vo).getStorages(name, protocolList=[])
        else:
            res = StorageFactory(useProxy=useProxy, vo=self.vo).getStorages(
                name, protocolList=protocols)
        if not res['OK']:
            self.valid = False
            self.name = name
            self.errorReason = res['Message']
        else:
            factoryDict = res['Value']
            self.name = factoryDict['StorageName']
            self.options = factoryDict['StorageOptions']
            self.localProtocols = factoryDict['LocalProtocols']
            self.remoteProtocols = factoryDict['RemoteProtocols']
            self.storages = factoryDict['StorageObjects']
            self.protocolOptions = factoryDict['ProtocolOptions']
            self.turlProtocols = factoryDict['TurlProtocols']

        self.log = gLogger.getSubLogger("SE[%s]" % self.name)

        self.readMethods = [
            'getFile', 'getAccessUrl', 'getTransportURL', 'prestageFile',
            'prestageFileStatus', 'getDirectory'
        ]

        self.writeMethods = [
            'retransferOnlineFile', 'putFile', 'replicateFile', 'pinFile',
            'releaseFile', 'createDirectory', 'putDirectory'
        ]

        self.removeMethods = ['removeFile', 'removeDirectory']

        self.checkMethods = [
            'exists',
            'getDirectoryMetadata',
            'getDirectorySize',
            'getFileSize',
            'getFileMetadata',
            'listDirectory',
            'isDirectory',
            'isFile',
        ]

        self.okMethods = [
            'getLocalProtocols', 'getPfnForProtocol', 'getPfnForLfn',
            'getPfnPath', 'getProtocols', 'getRemoteProtocols',
            'getStorageElementName', 'getStorageElementOption',
            'getStorageParameters', 'isLocalSE'
        ]
Ejemplo n.º 48
0
def main():
    from DIRAC.Core.Base import Script
    Script.registerSwitch('', 'FromSE=', 'SE1[,SE2,...]')
    Script.registerSwitch('', 'TargetSE=', 'SE1[,SE2,...]')
    Script.registerSwitch(
        '', 'OutputFile=',
        'CSV output file (default /tmp/protocol-matrix.csv)')
    Script.registerSwitch(
        '', 'Bidirection',
        'If FromSE or TargetSE are specified, make a square matrix ')
    Script.registerSwitch('', 'FTSOnly',
                          'Only display the protocols sent to FTS')
    Script.registerSwitch('', 'ExcludeSE=',
                          'SEs to not take into account for the matrix')

    from DIRAC.Core.Base.Script import parseCommandLine
    parseCommandLine()
    from DIRAC import gConfig, gLogger
    from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
    from DIRAC.Resources.Storage.StorageElement import StorageElement
    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup

    fromSE = []
    targetSE = []
    excludeSE = []
    outputFile = '/tmp/protocol-matrix.csv'
    bidirection = False
    ftsOnly = False
    for switch in Script.getUnprocessedSwitches():
        if switch[0] == 'FromSE':
            fromSE = switch[1].split(',')
        elif switch[0] == 'TargetSE':
            targetSE = switch[1].split(',')
        elif switch[0] == 'ExcludeSE':
            excludeSE = switch[1].split(',')
        elif switch[0] == 'OutputFile':
            outputFile = switch[1]
        elif switch[0] == 'Bidirection':
            bidirection = True
        elif switch[0] == 'FTSOnly':
            ftsOnly = True

    thirdPartyProtocols = DMSHelpers().getThirdPartyProtocols()

    # List all the BaseSE
    seBases = gConfig.getSections('/Resources/StorageElementBases')['Value']
    # construct a dict { baseSE : <inherited storages>}
    seForSeBases = {}

    allSEs = gConfig.getSections('/Resources/StorageElements/')['Value']

    # Remove the SEs that we want to exclude
    allSEs = set(allSEs) - set(excludeSE)

    # We go through all the SEs and fill in the seForSEBases dict.
    # Basically, at the end of the loop, the dict will contain
    # for each baseSE an entry corresponding to one real storage (the first one)
    # and itself for each real non inherited SE
    for se in allSEs:
        baseSE = gConfig.getOption('/Resources/StorageElements/%s/BaseSE' %
                                   se).get('Value')
        if baseSE:
            if baseSE not in seForSeBases:
                seForSeBases[baseSE] = se
        else:
            # If no baseSE, we put self
            seForSeBases[se] = se

    # Now let's take into account what source and destination we want.

    # If the user did not specify source or dest, generate everything
    if not fromSE and not targetSE:
        fromSE = list(seForSeBases)
        targetSE = list(seForSeBases)
    else:  # he specified at least source of dest

        # if bidirection, source and target should be the same
        if bidirection:
            if not fromSE and targetSE:  # we gave target, but no source
                fromSE = targetSE
            elif fromSE and not targetSE:  # we gave source but no target
                targetSE = fromSE
            elif fromSE and targetSE:  # we gave both
                fromSE = targetSE = list(set(fromSE + targetSE))

        else:  # no bidirection
            # he specified a targetSE
            if not fromSE:
                fromSE = list(seForSeBases)
            elif not targetSE:
                targetSE = list(seForSeBases)

    fromSE = sorted(fromSE)
    targetSE = sorted(targetSE)

    gLogger.notice("Using sources: %s" % ','.join(fromSE))
    gLogger.notice("Using target: %s" % ','.join(targetSE))

    # Now we construct the SE object for each SE that we want to appear
    ses = {}
    for se in set(fromSE + targetSE):
        ses[se] = StorageElement(seForSeBases.get(se, se))

    ret = getVOfromProxyGroup()
    if not ret['OK'] or not ret.get('Value', ''):
        gLogger.error('Aborting, Bad Proxy:',
                      ret.get('Message', 'Proxy does not belong to a VO!'))
        exit(1)
    vo = ret['Value']
    gLogger.notice('Using the Virtual Organization:', vo)
    # dummy LFN, still has to follow lfn convention
    lfn = '/%s/toto.xml' % vo

    # Create a matrix of protocol src/dest

    tpMatrix = defaultdict(dict)

    # For each source and destination, generate the url pair, and the compatible third party protocols
    for src, dst in ((x, y) for x in fromSE for y in targetSE):
        res = ses[dst].generateTransferURLsBetweenSEs(lfn, ses[src],
                                                      thirdPartyProtocols)
        if not res['OK']:
            surls = 'Error'
            gLogger.notice(
                "Could not generate transfer URLS",
                "src:%s, dst:%s, error:%s" % (src, dst, res['Message']))
        else:
            # We only keep the protocol part of the url
            surls = '/'.join(res['Value']['Protocols'])

        # Add also the third party protocols
        proto = ','.join(ses[dst].negociateProtocolWithOtherSE(
            ses[src], thirdPartyProtocols)['Value'])
        if ftsOnly:
            tpMatrix[src][dst] = '%s' % surls
        else:
            tpMatrix[src][dst] = '%s (%s)' % (surls, proto)
        gLogger.verbose("%s -> %s: %s" % (src, dst, surls))
        gLogger.verbose("%s -> %s: %s" % (src, dst, proto))

    # Write the matrix in the file
    with open(outputFile, 'wb') as csvfile:
        csvWriter = csv.writer(csvfile,
                               delimiter=';',
                               quoting=csv.QUOTE_MINIMAL)

        csvWriter.writerow(['src/dst'] + targetSE)

        for src in fromSE:
            srcRow = [src]
            for dst in targetSE:
                srcRow.append(tpMatrix[src].get(dst, 'NA'))
            csvWriter.writerow(srcRow)
    gLogger.notice('Wrote Matrix to', outputFile)
Ejemplo n.º 49
0
  def __init__( self, name, plugins = None, vo = None, hideExceptions = False ):
    """ c'tor

    :param str name: SE name
    :param list plugins: requested storage plugins
    :param: vo
    """

    self.methodName = None

    if vo:
      self.vo = vo
    else:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )

    proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
    useProxy = ( gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "UnknownProtocol" )
                in proxiedProtocols )

    if not useProxy:
      useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
    if not useProxy:
      useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )

    self.valid = True
    if plugins == None:
      res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = [], hideExceptions = hideExceptions )
    else:
      res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = plugins, hideExceptions = hideExceptions )

    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localPlugins = factoryDict['LocalPlugins']
      self.remotePlugins = factoryDict['RemotePlugins']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']
      for storage in self.storages:
        storage.setStorageElement( self )

    self.log = gLogger.getSubLogger( "SE[%s]" % self.name )
    self.useCatalogURL = gConfig.getValue( '/Resources/StorageElements/%s/UseCatalogURL' % self.name, False )

    #                         'getTransportURL',
    self.readMethods = [ 'getFile',
                         'prestageFile',
                         'prestageFileStatus',
                         'getDirectory']

    self.writeMethods = [ 'retransferOnlineFile',
                          'putFile',
                          'replicateFile',
                          'pinFile',
                          'releaseFile',
                          'createDirectory',
                          'putDirectory' ]

    self.removeMethods = [ 'removeFile', 'removeDirectory' ]

    self.checkMethods = [ 'exists',
                          'getDirectoryMetadata',
                          'getDirectorySize',
                          'getFileSize',
                          'getFileMetadata',
                          'listDirectory',
                          'isDirectory',
                          'isFile',
                           ]

    self.okMethods = [ 'getLocalProtocols',
                       'getProtocols',
                       'getRemoteProtocols',
                       'getStorageElementName',
                       'getStorageParameters',
                       'getTransportURL',
                       'isLocalSE' ]

    self.__fileCatalog = None
Ejemplo n.º 50
0
    def execute(self):
        """ Main execution function.
        """
        #Have to work out if the module is part of the last step i.e.
        #user jobs can have any number of steps and we only want
        #to run the finalization once.
        currentStep = int(self.step_commons['STEP_NUMBER'])
        totalSteps = int(self.workflow_commons['TotalSteps'])
        if currentStep == totalSteps:
            self.lastStep = True
        else:
            self.log.verbose(
                'Current step = %s, total steps of workflow = %s, UserJobFinalization will enable itself only \
            at the last workflow step.' % (currentStep, totalSteps))

        if not self.lastStep:
            #Not last step, do nothing, proceed happily.
            return S_OK()

        result = self.resolveInputVariables()
        if not result['OK']:
            self.log.error("Failed to resolve input parameters:",
                           result['Message'])
            return result

        self.log.info('Initializing %s' % self.version)
        if not self.workflowStatus['OK'] or not self.stepStatus['OK']:
            ##Something went wrong in the step or the workflow, do nothing.
            self.log.verbose(
                'Workflow status = %s, step status = %s' %
                (self.workflowStatus['OK'], self.stepStatus['OK']))
            return S_OK('No output data upload attempted')

        self.request.RequestName = 'job_%d_request.xml' % int(self.jobID)
        self.request.JobID = self.jobID
        self.request.SourceComponent = "Job_%d" % int(self.jobID)

        if not self.userOutputData:
            self.log.info(
                'No user output data is specified for this job, nothing to do')
            return S_OK('No output data to upload')

        #Determine the final list of possible output files for the
        #workflow and all the parameters needed to upload them.
        outputList = []
        possible_files = []
        for i in self.userOutputData:
            files = getGlobbedFiles(i)
            for possible_file in files:
                if possible_file in possible_files:
                    #Don't have twice the same file
                    continue
                outputList.append({
                    'outputDataType': i.split('.')[-1].upper(
                    ),  #this would be used to sort the files in different dirs
                    'outputDataSE': self.userOutputSE,
                    'outputFile': os.path.basename(possible_file)
                })
                possible_files.append(os.path.basename(possible_file))

        self.log.info('Constructing user output LFN(s) for %s' %
                      (', '.join(self.userOutputData)))
        if not self.jobID:
            self.jobID = 12345
        owner = ''
        if 'Owner' in self.workflow_commons:
            owner = self.workflow_commons['Owner']
        else:
            res = getCurrentOwner()
            if not res['OK']:
                self.log.error('Could not find proxy')
                return S_ERROR('Could not obtain owner from proxy')
            owner = res['Value']
        vo = ''
        if self.workflow_commons.has_key('VO'):
            vo = self.workflow_commons['VO']
        else:
            res = getVOfromProxyGroup()
            if not res['OK']:
                self.log.error('Failed finding the VO')
                return S_ERROR('Could not obtain VO from proxy')
            vo = res['Value']
        result = constructUserLFNs(int(self.jobID), vo, owner, possible_files,
                                   self.userOutputPath)
        if not result['OK']:
            self.log.error('Could not create user LFNs', result['Message'])
            return result
        userOutputLFNs = result['Value']

        self.log.verbose('Calling getCandidateFiles( %s, %s)' %
                         (outputList, userOutputLFNs))
        result = self.getCandidateFiles(outputList, userOutputLFNs)
        if not result['OK']:
            if not self.ignoreapperrors:
                self.log.error(result['Message'])
                self.setApplicationStatus(result['Message'])
                return S_OK()

        fileDict = result['Value']
        result = self.getFileMetadata(fileDict)
        if not result['OK']:
            if not self.ignoreapperrors:
                self.log.error(result['Message'])
                self.setApplicationStatus(result['Message'])
                return S_OK()

        if not result['Value']:
            if not self.ignoreapperrors:
                self.log.info(
                    'No output data files were determined to be uploaded for this workflow'
                )
                self.setApplicationStatus('No Output Data Files To Upload')
                return S_OK()

        fileMetadata = result['Value']

        orderedSEs = self.userOutputSE

        self.log.info('Ordered list of output SEs is: %s' %
                      (', '.join(orderedSEs)))
        final = {}
        for fileName, metadata in fileMetadata.items():
            final[fileName] = metadata
            final[fileName]['resolvedSE'] = orderedSEs

        #At this point can exit and see exactly what the module will upload
        if not self.enable:
            self.log.info(
                'Module is disabled by control flag, would have attempted \
to upload the following files %s' % ', '.join(final.keys()))
            for fileName, metadata in final.items():
                self.log.info('--------%s--------' % fileName)
                for n, v in metadata.items():
                    self.log.info('%s = %s' % (n, v))

            return S_OK('Module is disabled by control flag')

        #Instantiate the failover transfer client with the global request object
        failoverTransfer = FailoverTransfer(self.request)

        #One by one upload the files with failover if necessary
        replication = {}
        failover = {}
        uploaded = []
        if not self.failoverTest:
            for fileName, metadata in final.items():
                self.log.info(
                    "Attempting to store file %s to the following SE(s):\n%s" %
                    (fileName, ', '.join(metadata['resolvedSE'])))
                replicateSE = ''
                result = failoverTransfer.transferAndRegisterFile(
                    fileName,
                    metadata['localpath'],
                    metadata['lfn'],
                    metadata['resolvedSE'],
                    fileMetaDict=metadata,
                    fileCatalog=self.userFileCatalog)
                if not result['OK']:
                    self.log.error(
                        'Could not transfer and register %s with metadata:\n %s'
                        % (fileName, metadata))
                    failover[fileName] = metadata
                else:
                    #Only attempt replication after successful upload
                    lfn = metadata['lfn']
                    uploaded.append(lfn)
                    seList = metadata['resolvedSE']

                    if result['Value'].has_key('uploadedSE'):
                        uploadedSE = result['Value']['uploadedSE']
                        for se in seList:
                            if not se == uploadedSE:
                                replicateSE = se
                                break

                if replicateSE and lfn:
                    self.log.info('Will attempt to replicate %s to %s' %
                                  (lfn, replicateSE))
                    replication[lfn] = replicateSE
        else:
            failover = final

        cleanUp = False
        for fileName, metadata in failover.items():
            random.shuffle(self.failoverSEs)
            targetSE = metadata['resolvedSE'][0]
            metadata['resolvedSE'] = self.failoverSEs
            result = failoverTransfer.transferAndRegisterFileFailover(
                fileName,
                metadata['localpath'],
                metadata['lfn'],
                targetSE,
                self.failoverSEs,
                fileMetaDict=metadata,
                fileCatalog=self.userFileCatalog)
            if not result['OK']:
                self.log.error(
                    'Could not transfer and register %s with metadata:\n %s' %
                    (fileName, metadata))
                cleanUp = True
                continue  #for users can continue even if one completely fails
            else:
                lfn = metadata['lfn']
                uploaded.append(lfn)

        #For files correctly uploaded must report LFNs to job parameters
        if uploaded:
            report = ', '.join(uploaded)
            self.jobReport.setJobParameter('UploadedOutputData', report)

        self.request = failoverTransfer.request

        #If some or all of the files failed to be saved to failover
        if cleanUp:
            self.workflow_commons['Request'] = self.request
            #Leave any uploaded files just in case it is useful for the user
            #do not try to replicate any files.
            return S_ERROR('Failed To Upload Output Data')

        #If there is now at least one replica for uploaded files can trigger replication
        rm = ReplicaManager()
        self.log.info(
            'Sleeping for 10 seconds before attempting replication of recently uploaded files'
        )
        time.sleep(10)
        for lfn, repSE in replication.items():
            result = rm.replicateAndRegister(lfn,
                                             repSE,
                                             catalog=self.userFileCatalog)
            if not result['OK']:
                self.log.info(
                    'Replication failed with below error but file already exists in Grid storage with \
                at least one replica:\n%s' % (result))

        self.workflow_commons['Request'] = self.request
        self.generateFailoverFile()

        self.setApplicationStatus('Job Finished Successfully')
        return S_OK('Output data uploaded')
Ejemplo n.º 51
0
    def fts3Transfer(self):
        """replicate and register using FTS3"""

        self.log.info("scheduling files in FTS3...")

        # Check first if we do not have ongoing transfers

        res = self._checkExistingFTS3Operations()
        if not res["OK"]:
            return res

        # if res['Value'] is False
        # it means that there are ongoing transfers
        # and we should stop here
        if res["Value"] is False:
            # return S_OK such that the request is put back
            return S_OK()

        fts3Files = []
        toSchedule = {}

        # Dict which maps the FileID to the object
        rmsFilesIds = {}

        if self.rmsMonitoring:
            self.rmsMonitoringReporter.addRecord(
                self.createRMSRecord("Attempted",
                                     len(self.getWaitingFilesList())))

        for opFile in self.getWaitingFilesList():
            rmsFilesIds[opFile.FileID] = opFile

            opFile.Error = ""
            if not self.rmsMonitoring:
                gMonitor.addMark("FTSScheduleAtt")
            # # check replicas
            replicas = self._filterReplicas(opFile)
            if not replicas["OK"]:
                continue
            replicas = replicas["Value"]

            validReplicas = replicas["Valid"]
            noMetaReplicas = replicas["NoMetadata"]
            noReplicas = replicas["NoReplicas"]
            badReplicas = replicas["Bad"]
            noPFN = replicas["NoPFN"]

            if validReplicas:
                validTargets = list(
                    set(self.operation.targetSEList) - set(validReplicas))
                if not validTargets:
                    self.log.info("file %s is already present at all targets" %
                                  opFile.LFN)
                    opFile.Status = "Done"
                else:
                    toSchedule[opFile.LFN] = [opFile, validTargets]

            else:
                if self.rmsMonitoring:
                    self.rmsMonitoringReporter.addRecord(
                        self.createRMSRecord("Failed", 1))
                else:
                    gMonitor.addMark("FTSScheduleFail")
                if noMetaReplicas:
                    self.log.warn(
                        "unable to schedule file",
                        "'%s': couldn't get metadata at %s" %
                        (opFile.LFN, ",".join(noMetaReplicas)),
                    )
                    opFile.Error = "Couldn't get metadata"
                elif noReplicas:
                    self.log.error(
                        "Unable to schedule transfer",
                        "File %s doesn't exist at %s" %
                        (opFile.LFN, ",".join(noReplicas)),
                    )
                    opFile.Error = "No replicas found"
                    opFile.Status = "Failed"
                elif badReplicas:
                    self.log.error(
                        "Unable to schedule transfer",
                        "File %s, all replicas have a bad checksum at %s" %
                        (opFile.LFN, ",".join(badReplicas)),
                    )
                    opFile.Error = "All replicas have a bad checksum"
                    opFile.Status = "Failed"
                elif noPFN:
                    self.log.warn(
                        "unable to schedule %s, could not get a PFN at %s" %
                        (opFile.LFN, ",".join(noPFN)))

        if self.rmsMonitoring:
            self.rmsMonitoringReporter.commit()

        res = self._addMetadataToFiles(toSchedule)
        if not res["OK"]:
            return res
        else:
            filesToSchedule = res["Value"]

            for lfn in filesToSchedule:
                opFile = filesToSchedule[lfn]
                validTargets = toSchedule[lfn][1]
                for targetSE in validTargets:
                    ftsFile = FTS3File.fromRMSFile(opFile, targetSE)
                    fts3Files.append(ftsFile)

        if fts3Files:
            res = Registry.getUsernameForDN(self.request.OwnerDN)
            if not res["OK"]:
                self.log.error(
                    "Cannot get username for DN",
                    "%s %s" % (self.request.OwnerDN, res["Message"]))
                return res

            username = res["Value"]
            fts3Operation = FTS3TransferOperation.fromRMSObjects(
                self.request, self.operation, username)
            fts3Operation.ftsFiles = fts3Files

            try:
                if not fts3Operation.activity:
                    vo = getVOfromProxyGroup().get("Value")
                    fts3Plugin = getFTS3Plugin(vo=vo)
                    fts3Operation.activity = fts3Plugin.inferFTSActivity(
                        fts3Operation, self.request, self.operation)
            except Exception:
                pass

            ftsSchedule = FTS3Client().persistOperation(fts3Operation)
            if not ftsSchedule["OK"]:
                self.log.error("Completely failed to schedule to FTS3:",
                               ftsSchedule["Message"])
                return ftsSchedule

            # might have nothing to schedule
            ftsSchedule = ftsSchedule["Value"]
            self.log.info("Scheduled with FTS3Operation id %s" % ftsSchedule)

            self.log.info("%d files have been scheduled to FTS3" %
                          len(fts3Files))

            if self.rmsMonitoring:
                self.rmsMonitoringReporter.addRecord(
                    self.createRMSRecord("Successful", len(fts3Files)))

            for ftsFile in fts3Files:
                opFile = rmsFilesIds[ftsFile.rmsFileID]
                if not self.rmsMonitoring:
                    gMonitor.addMark("FTSScheduleOK", 1)
                opFile.Status = "Scheduled"
                self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
        else:
            self.log.info("No files to schedule after metadata checks")

        if self.rmsMonitoring:
            self.rmsMonitoringReporter.commit()

        # Just in case some transfers could not be scheduled, try them with RM
        return self.dmTransfer(fromFTS=True)