Esempio n. 1
0
def removeLibc(libraryPath):
  """ Remove libraries that can be problematic, like libc.so

  :param string libraryPath: libraryPath to look for libraries to remove
  :returns: True on Success, False in case of error
  """

  gLogger.debug("RemoveLibC: Trying to remove these libraries:")
  gLogger.debug("RemoveLibC - "+ "\nRemoveLibC - ".join(FILES_TO_REMOVE) )

  curdir = os.getcwd()
  try:
    os.chdir(libraryPath)
  except OSError:
    return True
  listlibs = os.listdir(os.getcwd())
  for lib in listlibs:
    for lib_to_remove in FILES_TO_REMOVE:
      if lib.count(lib_to_remove):
        try:
          libraryPath = os.getcwd() + os.sep + lib
          gLogger.info("RemoveLibC: Trying to remove: %s" % libraryPath)
          os.remove(libraryPath)
        except OSError:
          gLogger.error("RemoveLibC: Could not remove", lib)
          os.chdir(curdir)
          return False
  os.chdir(curdir)
  return True
Esempio n. 2
0
def jobexec(jobxml, wfParameters):
  jobfile = os.path.abspath(jobxml)
  if not os.path.exists(jobfile):
    gLogger.warn('Path to specified workflow %s does not exist' % (jobfile))
    sys.exit(1)
  workflow = fromXMLFile(jobfile)
  gLogger.debug(workflow)
  code = workflow.createCode()
  gLogger.debug(code)
  jobID = 0
  if 'JOBID' in os.environ:
    jobID = os.environ['JOBID']
    gLogger.info('DIRAC JobID %s is running at site %s' % (jobID, DIRAC.siteName()))

  workflow.addTool('JobReport', JobReport(jobID))
  workflow.addTool('AccountingReport', DataStoreClient())
  workflow.addTool('Request', Request())

  # Propagate the command line parameters to the workflow if any
  for pName, pValue in wfParameters.items():
    workflow.setValue(pName, pValue)

  # Propagate the command line parameters to the workflow module instances of each step
  for stepdefinition in workflow.step_definitions.itervalues():
    for moduleInstance in stepdefinition.module_instances:
      for pName, pValue in wfParameters.iteritems():
        if moduleInstance.parameters.find(pName):
          moduleInstance.parameters.setValue(pName, pValue)

  return workflow.execute()
  def __irodsClient( self , user = None ):

    global IRODS_USER
    password = None

    cfgPath = self.serviceInfoDict[ 'serviceSectionPath' ]
    gLogger.debug( "cfgPath: %s" % cfgPath )

    if not user:
      credentials = self.getRemoteCredentials()
      if credentials and ( "username" in credentials ):
        IRODS_USER = credentials[ "username" ]
        ## TODO: should get user password somehow
    elif user == "r":
      IRODS_USER = gConfig.getValue( "%s/read" % cfgPath , IRODS_USER )
    elif user == "w":
      IRODS_USER = gConfig.getValue( "%s/write" % cfgPath , IRODS_USER )

    if not IRODS_USER:
      return False , "Failed to get iRods user"
    gLogger.debug( "iRods user: %s" % IRODS_USER )

    password = gConfig.getValue( "%s/%s" % ( cfgPath , IRODS_USER ) , password )

    conn , errMsg = rcConnect( IRODS_HOST , IRODS_PORT , IRODS_USER , IRODS_ZONE )

    status = clientLoginWithPassword( conn , password )

    if not status == 0:
      return False , "Failed to authenticate user '%s'" % IRODS_USER

    return conn , errMsg
Esempio n. 4
0
  def __getStageSubmittedReplicas( self ):
    """ This obtains the StageSubmitted replicas from the Replicas table and the RequestID from the StageRequests table """
    res = self.storageDB.getCacheReplicas( {'Status':'StageSubmitted'} )
    if not res['OK']:
      gLogger.error( "StageRequest.__getStageSubmittedReplicas: Failed to get replicas with StageSubmitted status.", res['Message'] )
      return res
    if not res['Value']:
      gLogger.debug( "StageRequest.__getStageSubmittedReplicas: No StageSubmitted replicas found to process." )
      return S_OK()
    else:
      gLogger.debug( "StageRequest.__getStageSubmittedReplicas: Obtained %s StageSubmitted replicas(s) to process." % len( res['Value'] ) )

    seReplicas = {}
    replicaIDs = res['Value']
    for replicaID, info in replicaIDs.items():
      storageElement = info['SE']
      if not seReplicas.has_key( storageElement ):
        seReplicas[storageElement] = []
      seReplicas[storageElement].append( replicaID )

    # RequestID was missing from replicaIDs dictionary BUGGY?
    res = self.storageDB.getStageRequests( {'ReplicaID':replicaIDs.keys()} )
    if not res['OK']:
      return res
    if not res['Value']:
      return S_ERROR( 'Could not obtain request IDs for replicas %s from StageRequests table' % ( replicaIDs.keys() ) )

    for replicaID, info in res['Value'].items():
      reqID = info['RequestID']
      replicaIDs[replicaID]['RequestID'] = reqID

    return S_OK( {'SEReplicas':seReplicas, 'ReplicaIDs':replicaIDs} )
  def __query( self, queryType, tableName, parameters ):
    '''
      This method is a rather important one. It will format the input for the DB
      queries, instead of doing it on a decorator. Two dictionaries must be passed
      to the DB. First one contains 'columnName' : value pairs, being the key
      lower camel case. The second one must have, at lease, a key named 'table'
      with the right table name. 
    '''
    # Functions we can call, just a light safety measure.
    _gateFunctions = [ 'insert', 'update', 'select', 'delete', 'addOrModify', 'addIfNotThere'  ] 
    if not queryType in _gateFunctions:
      return S_ERROR( '"%s" is not a proper gate call' % queryType )
    
    gateFunction = getattr( self.gate, queryType )
    
    # If meta is None, we set it to {}
    meta   = ( True and parameters.pop( 'meta' ) ) or {}
#    params = parameters
    # Remove self, added by locals()
    del parameters[ 'self' ]     
        
    meta[ 'table' ] = tableName
    
    gLogger.debug( 'Calling %s, with \n params %s \n meta %s' % ( queryType, parameters, meta ) )  
    return gateFunction( parameters, meta )
    
    
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
Esempio n. 6
0
  def export_update( self, params, meta ):
    '''   
    This method is a bridge to access :class:`ResourceManagementDB` remotely. It 
    does not add neither processing nor validation. If you need to know more 
    about this method, you must keep reading on the database documentation.     
      
    :Parameters:
      **params** - `dict`
        arguments for the mysql query ( must match table columns ! ).
    
      **meta** - `dict`
        metadata for the mysql query. It must contain, at least, `table` key
        with the proper table name.

    :return: S_OK() || S_ERROR()
    '''      

    gLogger.info( 'update: %s %s' % ( params, meta ) )
    
    try:
      res = db.update( params, meta )
      gLogger.debug( 'update %s' % res )
    except Exception, e:
      _msg = 'Exception calling db.update: \n %s' % e
      gLogger.exception( _msg )
      res = S_ERROR( _msg )
  def transfer_toClient( self, fileID, token, fileHelper ):
    """ Method to send files to clients.
fileID is the local file name in the SE.
token is used for access rights confirmation.
"""

    conn , error = self.__irodsClient( "r" )
    if not conn:
      return S_ERROR( error )

    file_path = self.__resolveFileID( fileID )
    file_path = IRODS_HOME + file_path
    gLogger.debug( "file_path to read: %s" % file_path )

    fd = iRodsOpen( conn , file_path , "r" )
    if not fd:
      rcDisconnect( conn )
      gLogger.error( "Failed to get file object" )
      return S_ERROR( "Failed to get file object" )

    result = fileHelper.FileToNetwork( fd )
    fd.close()

    rcDisconnect( conn )
    if not result[ "OK" ]:
      gLogger.error( "Failed to get file " + fileID )
      return S_ERROR( "Failed to get file " + fileID )
    else:
      return result
Esempio n. 8
0
  def export_rescheduleJob( self, jobIDs ):
    """  Reschedule a single job. If the optional proxy parameter is given
         it will be used to refresh the proxy in the Proxy Repository
    """

    jobList = self.__get_job_list( jobIDs )
    if not jobList:
      return S_ERROR( 'Invalid job specification: ' + str( jobIDs ) )

    validJobList, invalidJobList, nonauthJobList, ownerJobList = self.__evaluate_rights( jobList,
                                                                        RIGHT_RESCHEDULE )
    for jobID in validJobList:
      gtaskQueueDB.deleteJob( jobID )
      #gJobDB.deleteJobFromQueue(jobID)
      result = gJobDB.rescheduleJob( jobID )
      gLogger.debug( str( result ) )
      if not result['OK']:
        return result
      gJobLoggingDB.addLoggingRecord( result['JobID'], result['Status'], result['MinorStatus'],
                                      application = 'Unknown', source = 'JobManager' )

    if invalidJobList or nonauthJobList:
      result = S_ERROR( 'Some jobs failed deletion' )
      if invalidJobList:
        result['InvalidJobIDs'] = invalidJobList
      if nonauthJobList:
        result['NonauthorizedJobIDs'] = nonauthJobList
      return result

    result = S_OK( validJobList )
    result[ 'requireProxyUpload' ] = len( ownerJobList ) > 0 and self.__checkIfProxyUploadIsRequired()
    self.__sendNewJobsToMind( validJobList )
    return result
Esempio n. 9
0
    def run(self):
        """ The main watchdog execution method
    """

        result = self.initialize()
        if not result['OK']:
            gLogger.always('Can not start wtchdog for the following reason')
            gLogger.always(result['Message'])
            return result

        try:
            while True:
                gLogger.debug('Starting agent loop # %d' % self.count)
                start_cycle_time = time.time()
                result = self.execute()
                exec_cycle_time = time.time() - start_cycle_time
                if not result['OK']:
                    gLogger.error("Watchdog error during execution",
                                  result['Message'])
                    break
                elif result['Value'] == "Ended":
                    break
                self.count += 1
                if exec_cycle_time < self.pollingTime:
                    time.sleep(self.pollingTime - exec_cycle_time)
            return S_OK()
        except Exception, x:
            gLogger.exception()
            return S_ERROR('Exception')
  def aftermath(self, actionSuccess, actionFailed, action, prefix):

    success = ", ".join(actionSuccess)
    failure = "\n".join(actionFailed)

    if len(actionSuccess) > 1:
      sText = prefix + "s"
    else:
      sText = prefix
      
    if len(actionFailed) > 1:
      fText = prefix + "s"
    else:
      fText = prefix

    if len(success) > 0 and len(failure) > 0:
      sMessage = "%s %sed successfully: " % (sText , action , success)
      fMessage = "Failed to %s %s:\n%s" % (action , fText , failure)
      result = sMessage + "\n\n" + fMessage
      return { "success" : "true" , "result" : result }
    elif len(success) > 0 and len(failure) < 1:
      result = "%s %sed successfully: %s" % (sText , action , success)
      return { "success" : "true" , "result" : result }
    elif len(success) < 1 and len(failure) > 0:
      result = "Failed to %s %s:\n%s" % (action , fText , failure)
      gLogger.always(result)
      return { "success" : "false" , "error" : result }
    else:
      result = "No action has performed due technical failure. Check the logs please"
      gLogger.debug(result)
      return { "success" : "false" , "error" : result }
  def web_getHostData(self):
    """
    Returns flatten list of components (services, agents) installed on hosts
    returned by getHosts function
    """

    # checkUserCredentials()
    userData = self.getSessionData()
    
    DN = str(userData["user"]["DN"])
    group = str(userData["user"]["group"])

    callback = list()
    
    if not (self.request.arguments.has_key("hostname") and len(self.request.arguments["hostname"][0]) > 0):
      self.finish({ "success" : "false" , "error" : "Name of the host is absent" })
      return
    
    host = self.request.arguments["hostname"][0]
    client = SystemAdministratorClient(host , None , delegatedDN=DN ,
                                          delegatedGroup=group)
    result = yield self.threadTask(client.getOverallStatus)
    gLogger.debug("Result of getOverallStatus(): %s" % result)

    if not result[ "OK" ]:
      self.finish({ "success" : "false" , "error" : result[ "Message" ] })
      return
    
    overall = result[ "Value" ]
   
    for record in self.flatten(overall):
      record[ "Host" ] = host
      callback.append(record)

    self.finish({ "success" : "true" , "result" : callback })
Esempio n. 12
0
 def getDirectoryMetadata( self, path ):
   """ Get the metadata for the directory
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.getDirectoryMetadata: Attempting to get metadata for %s directories." % len( urls ) )
   res = self.isDirectory( urls )
   if not res['OK']:
     return res
   successful = {}
   failed = res['Value']['Failed']
   directories = []
   for url, isDirectory in res['Value']['Successful'].items():
     if isDirectory:
       directories.append( url )
     else:
       errStr = "RFIOStorage.getDirectoryMetadata: Directory does not exist."
       gLogger.error( errStr, url )
       failed[url] = errStr
   res = self.__getPathMetadata( directories )
   if not res['OK']:
     return res
   else:
     failed.update( res['Value']['Failed'] )
     successful = res['Value']['Successful']
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
  def web_getHostErrors(self):

    userData = self.getSessionData()
    
    DN = str(userData["user"]["DN"])
    group = str(userData["user"]["group"])
    
    if not "host" in self.request.arguments:
      self.finish({ "success" : "false" , "error" : "Name of the host is missing or not defined" })
      return
    
    host = str(self.request.arguments[ "host" ][0])

    client = SystemAdministratorClient(host , None , delegatedDN=DN , delegatedGroup=group)

    result = yield self.threadTask(client.checkComponentLog, "*")
    
    gLogger.debug(result)
    if not result[ "OK" ]:
      self.finish({ "success" : "false" , "error" : result[ "Message" ] })
      return
    result = result[ "Value" ]
    
    callback = list()
    for key, value in result.items():
      system, component = key.split("/")
      value[ "System" ] = system
      value[ "Name" ] = component
      value[ "Host" ] = host
      callback.append(value)
    total = len(callback)

    self.finish({ "success" : "true" , "result" : callback , "total" : total })
Esempio n. 14
0
 def isFile( self, path ):
   """Check if the given path exists and it is a file
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.isFile: Determining whether %s paths are files." % len( urls ) )
   successful = {}
   failed = {}
   comm = "nsls -ld"
   for url in urls:
     comm = " %s %s" % ( comm, url )
   res = shellCall( self.timeout, comm )
   if not res['OK']:
     return res
   returncode, stdout, stderr = res['Value']
   if returncode in [0, 1]:
     for line in stdout.splitlines():
       permissions, _subdirs, _owner, _group, _size, _month, _date, _timeYear, pfn = line.split()
       if permissions[0] != 'd':
         successful[pfn] = True
       else:
         successful[pfn] = False
     for line in stderr.splitlines():
       pfn, error = line.split( ': ' )
       url = pfn.strip()
       failed[url] = error
   else:
     errStr = "RFIOStorage.isFile: Completely failed to determine whether path is file."
     gLogger.error( errStr, "%s %s" % ( self.name, stderr ) )
     return S_ERROR( errStr )
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Esempio n. 15
0
 def removeDirectory( self, path, recursive = False ):
   """Remove a directory on the physical storage together with all its files and
      subdirectories.
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.removeDirectory: Attempting to remove %s directories." % len( urls ) )
   successful = {}
   failed = {}
   for url in urls:
     comm = "nsrm -r %s" % url
     res = shellCall( 100, comm )
     if res['OK']:
       returncode, _stdout, stderr = res['Value']
       if returncode == 0:
         successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
       elif returncode == 1:
         successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
       else:
         failed[url] = stderr
     else:
       errStr = "RFIOStorage.removeDirectory: Completely failed to remove directory."
       gLogger.error( errStr, "%s %s" % ( url, res['Message'] ) )
       failed[url] = res['Message']
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Esempio n. 16
0
  def getDirectory( self, path, localPath = False ):
    """ Get locally a directory from the physical storage together with all its files and subdirectories.
    """
    res = self.__checkArgumentFormat( path )
    if not res['OK']:
      return res
    urls = res['Value']

    successful = {}
    failed = {}
    gLogger.debug( "RFIOStorage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
    for src_directory in urls:
      dirName = os.path.basename( src_directory )
      if localPath:
        dest_dir = "%s/%s" % ( localPath, dirName )
      else:
        dest_dir = "%s/%s" % ( os.getcwd(), dirName )
      res = self.__getDir( src_directory, dest_dir )
      if res['OK']:
        if res['Value']['AllGot']:
          gLogger.debug( "RFIOStorage.getDirectory: Successfully got local copy of %s" % src_directory )
          successful[src_directory] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
        else:
          gLogger.error( "RFIOStorage.getDirectory: Failed to get entire directory.", src_directory )
          failed[src_directory] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
      else:
        gLogger.error( "RFIOStorage.getDirectory: Completely failed to get local copy of directory.", src_directory )
        failed[src_directory] = {'Files':0, 'Size':0}
    resDict = {'Failed':failed, 'Successful':successful}
    return S_OK( resDict )
Esempio n. 17
0
  def putDirectory( self, path ):
    """ Put a local directory to the physical storage together with all its files and subdirectories.
    """
    res = checkArgumentFormat( path )
    if not res['OK']:
      return res
    urls = res['Value']

    successful = {}
    failed = {}
    gLogger.debug( "RFIOStorage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
    for destDir, sourceDir in urls.items():
      res = self.__putDir( sourceDir, destDir )
      if res['OK']:
        if res['Value']['AllPut']:
          gLogger.debug( "RFIOStorage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
          successful[destDir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
        else:
          gLogger.error( "RFIOStorage.putDirectory: Failed to put entire directory to remote storage.", destDir )
          failed[destDir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
      else:
        gLogger.error( "RFIOStorage.putDirectory: Completely failed to put directory to remote storage.", destDir )
        failed[destDir] = {'Files':0, 'Size':0}
    resDict = {'Failed':failed, 'Successful':successful}
    return S_OK( resDict )
Esempio n. 18
0
 def exists( self, path ):
   """ Check if the given path exists. The 'path' variable can be a string or a list of strings.
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.exists: Determining the existance of %s files." % len( urls ) )
   comm = "nsls -d"
   for url in urls:
     comm = " %s %s" % ( comm, url )
   res = shellCall( self.timeout, comm )
   successful = {}
   failed = {}
   if res['OK']:
     returncode, stdout, stderr = res['Value']
     if returncode in [0, 1]:
       for line in stdout.splitlines():
         url = line.strip()
         successful[url] = True
       for line in stderr.splitlines():
         pfn, _ = line.split( ': ' )
         url = pfn.strip()
         successful[url] = False
     else:
       errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
       gLogger.error( errStr, "%s %s" % ( self.name, stderr ) )
       return S_ERROR( errStr )
   else:
     errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
     gLogger.error( errStr, "%s %s" % ( self.name, res['Message'] ) )
     return S_ERROR( errStr )
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Esempio n. 19
0
 def isDirectory( self, path ):
   """Check if the given path exists and it is a directory
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.isDirectory: Determining whether %s paths are directories." % len( urls ) )
   res = self.__getPathMetadata( urls )
   if not res['OK']:
     return res
   failed = {}
   successful = {}
   for pfn, error in res['Value']['Failed'].items():
     if error == 'No such file or directory':
       failed[pfn] = 'Directory does not exist'
     else:
       failed[pfn] = error
   for pfn, pfnDict in res['Value']['Successful'].items():
     if pfnDict['Type'] == 'Directory':
       successful[pfn] = True
     else:
       successful[pfn] = False
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Esempio n. 20
0
 def getFileMetadata( self, path ):
   """  Get metadata associated to the file
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.getFileMetadata: Obtaining metadata for %s files." % len( urls ) )
   res = self.__getPathMetadata( urls )
   if not res['OK']:
     return res
   failed = {}
   successful = {}
   for pfn, error in res['Value']['Failed'].items():
     if error == 'No such file or directory':
       failed[pfn] = 'File does not exist'
     else:
       failed[pfn] = error
   files = []
   for pfn, pfnDict in res['Value']['Successful'].items():
     if pfnDict['Type'] == 'Directory':
       failed[pfn] = "Supplied path is not a file"
     else:
       successful[pfn] = res['Value']['Successful'][pfn]
       files.append( pfn )
   if files:
     res = self.__getFileMetadata( files )
     if not res['OK']:
       return res
     for pfn, pfnDict in res['Value']['Successful'].items():
       successful[pfn].update( pfnDict )
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Esempio n. 21
0
 def getFileSize( self, path ):
   """Get the physical size of the given file
   """
   res = self.__checkArgumentFormat( path )
   if not res['OK']:
     return res
   urls = res['Value']
   gLogger.debug( "RFIOStorage.getFileSize: Determining the sizes for  %s files." % len( urls ) )
   res = self.__getPathMetadata( urls )
   if not res['OK']:
     return res
   failed = {}
   successful = {}
   for pfn, error in res['Value']['Failed'].items():
     if error == 'No such file or directory':
       failed[pfn] = 'File does not exist'
     else:
       failed[pfn] = error
   for pfn, pfnDict in res['Value']['Successful'].items():
     if pfnDict['Type'] == 'Directory':
       failed[pfn] = "Supplied path is not a file"
     else:
       successful[pfn] = res['Value']['Successful'][pfn]['Size']
   resDict = {'Failed':failed, 'Successful':successful}
   return S_OK( resDict )
Esempio n. 22
0
  def killJob( self, jobIDList ):
    """ Kill the specified jobs
    """

    result = self._prepareProxy()
    self.usercfg.ProxyPath(os.environ['X509_USER_PROXY'])
    if not result['OK']:
      gLogger.error( 'ARCComputingElement: failed to set up proxy', result['Message'] )
      return result

    js = arc.compute.JobSupervisor(self.usercfg)

    jobList = list( jobIDList )
    if isinstance( jobIDList, basestring ):
      jobList = [ jobIDList ]

    for jobID in jobList:
      job = self.__getARCJob( jobID )
      js.AddJob( job )

    result = js.Cancel() # Cancel all jobs at once

    if not result:
      gLogger.debug("Failed to kill jobs %s. CE(?) not reachable?" % jobIDList)
      return S_ERROR( 'Failed to kill the job(s)' )
    else:
      gLogger.debug("Killed jobs %s" % jobIDList)


    return S_OK()
Esempio n. 23
0
def getSiteCEMapping( gridName = '' ):
  """ Returns a dictionary of all sites and their CEs as a list, e.g.
      {'LCG.CERN.ch':['ce101.cern.ch',...]}
      If gridName is specified, result is restricted to that Grid type.
  """
  siteCEMapping = {}
  gridTypes = gConfig.getSections( 'Resources/Sites/', [] )
  if not gridTypes['OK']:
    gLogger.warn( 'Problem retrieving sections in /Resources/Sites' )
    return gridTypes

  gridTypes = gridTypes['Value']
  if gridName:
    if not gridName in gridTypes:
      return S_ERROR( 'Could not get sections for /Resources/Sites/%s' % gridName )
    gridTypes = [gridName]

  gLogger.debug( 'Grid Types are: %s' % ( ', '.join( gridTypes ) ) )
  for grid in gridTypes:
    sites = gConfig.getSections( '/Resources/Sites/%s' % grid, [] )
    if not sites['OK']:
      gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid )
      return sites
    for candidate in sites['Value']:
      candidateCEs = gConfig.getValue( '/Resources/Sites/%s/%s/CE' % ( grid, candidate ), [] )
      if candidateCEs:
        siteCEMapping[candidate] = candidateCEs
      else:
        gLogger.debug( 'No CEs defined for site %s' % candidate )

  return S_OK( siteCEMapping )
Esempio n. 24
0
def _getPoolCatalogs( directory = '' ):

  patterns = ['*.xml', '*.xml*gz']
  omissions = ['\.bak$'] # to be ignored for production files

  #First obtain valid list of unpacked catalog files in directory
  poolCatalogList = []

  for pattern in patterns:
    fileList = glob.glob( os.path.join( directory, pattern ) )
    for fname in fileList:
      if fname.endswith( '.bak' ):
        gLogger.verbose( 'Ignoring BAK file: %s' % fname )
      elif tarfile.is_tarfile( fname ):
        try:
          gLogger.debug( 'Unpacking catalog XML file %s' % ( os.path.join( directory, fname ) ) )
          tarFile = tarfile.open( os.path.join( directory, fname ), 'r' )
          for member in tarFile.getmembers():
            tarFile.extract( member, directory )
            poolCatalogList.append( os.path.join( directory, member.name ) )
        except Exception, x :
          gLogger.error( 'Could not untar with exception', 
                         ' %s: %s' % ( fname, str( x ) ) )
      else:
        poolCatalogList.append( fname )
Esempio n. 25
0
    def run(self):
        """ The main watchdog execution method
    """

        result = self.initialize()
        if not result["OK"]:
            gLogger.always("Can not start watchdog for the following reason")
            gLogger.always(result["Message"])
            return result

        try:
            while True:
                gLogger.debug("Starting watchdog loop # %d" % self.count)
                start_cycle_time = time.time()
                result = self.execute()
                exec_cycle_time = time.time() - start_cycle_time
                if not result["OK"]:
                    gLogger.error("Watchdog error during execution", result["Message"])
                    break
                elif result["Value"] == "Ended":
                    break
                self.count += 1
                if exec_cycle_time < self.pollingTime:
                    time.sleep(self.pollingTime - exec_cycle_time)
            return S_OK()
        except Exception:
            gLogger.exception()
            return S_ERROR("Exception")
  def insertCSSection( self, path, pardict ):
    """ insert a section and values (or subsections) into the CS

    :param str path: full path of the new section
    :param str pardict: dictionary of key values in the new section, values can also be dictionaries
    :return: S_OK(), S_ERROR()
    """
    from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
    if self.csapi is None:
      self.csapi = CSAPI()

    for key, value in pardict.iteritems():
      newSectionPath = os.path.join(path,key)
      gLogger.debug( "Adding to cs %s : %s " % ( newSectionPath, value ) )
      self.csapi.createSection( path )
      if isinstance( value, dict ):
        res = self.insertCSSection( newSectionPath, value )
      else:
        res = self.csapi.setOption( newSectionPath, value )

      if not res['OK']:
        return res
      else:
        gLogger.notice( "Added to CS: %s " % res['Value'] )

    return S_OK("Added all things to cs")
Esempio n. 27
0
 def _receiveAndCheckProposal( self, trid ):
   clientTransport = self._transportPool.get( trid )
   #Get the peer credentials
   credDict = clientTransport.getConnectingCredentials()
   #Receive the action proposal
   retVal = clientTransport.receiveData( 1024 )
   if not retVal[ 'OK' ]:
     gLogger.error( "Invalid action proposal", "%s %s" % ( self._createIdentityString( credDict,
                                                                                       clientTransport ),
                                                           retVal[ 'Message' ] ) )
     return S_ERROR( "Invalid action proposal" )
   proposalTuple = retVal[ 'Value' ]
   gLogger.debug( "Received action from client", "/".join( list( proposalTuple[1] ) ) )
   #Check if there are extra credentials
   if proposalTuple[2]:
     clientTransport.setExtraCredentials( proposalTuple[2] )
   #Check if this is the requested service
   requestedService = proposalTuple[0][0]
   if requestedService not in self._validNames:
     return S_ERROR( "%s is not up in this server" % requestedService )
   #Check if the action is valid
   requestedActionType = proposalTuple[1][0]
   if requestedActionType not in Service.SVC_VALID_ACTIONS:
     return S_ERROR( "%s is not a known action type" % requestedActionType )
   #Check if it's authorized
   result = self._authorizeProposal( proposalTuple[1], trid, credDict )
   if not result[ 'OK' ]:
     return result
   #Proposal is OK
   return S_OK( proposalTuple )
Esempio n. 28
0
def getCESiteMapping( gridName = '' ):
  """ Returns a dictionary of all CEs and their associated site, e.g.
      {'ce101.cern.ch':'LCG.CERN.ch', ...]}
      Assumes CS structure of: /Resources/Sites/<GRIDNAME>/<SITENAME>
  """
  ceSiteMapping = {}
  gridTypes = gConfig.getSections( '/Resources/Sites/', [] )
  if not gridTypes['OK']:
    gLogger.warn( 'Problem retrieving sections in /Resources/Sites' )
    return gridTypes

  gridTypes = gridTypes['Value']
  if gridName:
    if not gridName in gridTypes:
      return S_ERROR( 'Could not get sections for /Resources/Sites/%s' % gridName )
    gridTypes = [gridName]

  gLogger.debug( 'Grid Types are: %s' % ( ', '.join( gridTypes ) ) )
  for grid in gridTypes:
    sites = gConfig.getSections( '/Resources/Sites/%s' % grid, [] )
    if not sites['OK']: #gConfig returns S_ERROR for empty sections until version
      gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid )
      return sites
    if sites:
      for candidate in sites['Value']:
        siteCEs = gConfig.getValue( '/Resources/Sites/%s/%s/CE' % ( grid, candidate ), [] )
        for ce in siteCEs:
          if ceSiteMapping.has_key( ce ):
            current = ceSiteMapping[ce]
            gLogger.warn( 'CE %s already has a defined site %s but it is also defined for %s' % ( ce, current, candidate ) )
          else:
            ceSiteMapping[ce] = candidate

  return S_OK( ceSiteMapping )
Esempio n. 29
0
def cleanUpLFNPath( lfn ):
  """ Normalise LFNs
  """
  gLogger.debug("LFN before Cleanup", lfn)
  lfn = posixpath.normpath(lfn)
  gLogger.verbose("LFN after Cleanup", lfn)
  return lfn
Esempio n. 30
0
    def _getDirectoryContent(directory):
      """ Inner function: recursively scan a directory, returns list of LFNs
      """
      filesInDirectory = {}

      gLogger.debug("Examining %s" % directory)

      res = self.fileCatalog.listDirectory(directory)
      if not res['OK']:
        gLogger.error('Failed to get directory contents', res['Message'])
        return res
      if directory in res['Value']['Failed']:
        gLogger.error('Failed to get directory content', '%s %s' %
                      (directory, res['Value']['Failed'][directory]))
        return S_ERROR('Failed to get directory content')
      if directory not in res['Value']['Successful']:
        return S_ERROR('Directory not existing?')

      # first, adding the files found in the current directory
      gLogger.debug("Files in %s: %d" % (directory, len(
          res['Value']['Successful'][directory]['Files'])))
      filesInDirectory.update(res['Value']['Successful'][directory]['Files'])

      # then, looking for subDirectories content
      if res['Value']['Successful'][directory]['SubDirs']:
        for l_dir in res['Value']['Successful'][directory]['SubDirs']:
          # recursion here
          subDirContent = _getDirectoryContent(l_dir)
          if not subDirContent['OK']:
            return subDirContent
          else:
            filesInDirectory.update(subDirContent['Value'])

      return S_OK(filesInDirectory)
Esempio n. 31
0
    def __request_file(self):
        req = {"selection": {}, "path": "/"}

        separator = ":::"

        result = self.fc.getMetadataFields()
        gLogger.debug("request: %s" % result)

        if not result["OK"]:
            gLogger.error("request: %s" % result["Message"])
            return req
        result = result["Value"]

        if not result.has_key("FileMetaFields"):
            error = "Service response has no FileMetaFields key. Return empty dict"
            gLogger.error("request: %s" % error)
            return req

        if not result.has_key("DirectoryMetaFields"):
            error = "Service response has no DirectoryMetaFields key. Return empty dict"
            gLogger.error("request: %s" % error)
            return req

        filemeta = result["FileMetaFields"]
        dirmeta = result["DirectoryMetaFields"]

        meta = []
        for key, value in dirmeta.items():
            meta.append(key)

        gLogger.always("request: metafields: %s " % meta)

        selectionElems = self.request.arguments["selection"][0].split("<|>")

        gLogger.always("request: THISSSS %s " %
                       self.request.arguments["selection"][0])

        for param in selectionElems:

            tmp = str(param).split('|')

            if len(tmp) != 4:
                continue

            name = tmp[0]
            logic = tmp[1]

            if not logic in ["in", "nin", "=", "!=", ">=", "<=", ">", "<"]:
                gLogger.always("Operand '%s' is not supported " % logic)
                continue

            if name in meta:
                #check existence of the 'name' section
                if not req["selection"].has_key(name):
                    req["selection"][name] = dict()

                #check existence of the 'sign' section
                if not req["selection"][name].has_key(logic):
                    if tmp[2] == "v":
                        req["selection"][name][logic] = ""
                    elif tmp[2] == "s":
                        req["selection"][name][logic] = []

                if tmp[2] == "v":
                    req["selection"][name][logic] = tmp[3]
                elif tmp[2] == "s":
                    req["selection"][name][logic] += tmp[3].split(":::")
        if self.request.arguments.has_key("path"):
            req["path"] = self.request.arguments["path"][0]
        gLogger.always("REQ: ", req)
        return req
Esempio n. 32
0
    def web_getLaunchpadOpts(self):

        defaultParams = {
            "JobName": [1, 'DIRAC'],
            "Executable": [1, "/bin/ls"],
            "Arguments": [1, "-ltrA"],
            "OutputSandbox": [1, "std.out, std.err"],
            "InputData": [0, ""],
            "OutputData": [0, ""],
            "OutputSE": [0, "DIRAC-USER"],
            "OutputPath": [0, ""],
            "CPUTime": [0, "86400"],
            "Site": [0, ""],
            "BannedSite": [0, ""],
            "Platform": [0, "Linux_x86_64_glibc-2.5"],
            "Priority": [0, "5"],
            "StdError": [0, "std.err"],
            "StdOutput": [0, "std.out"],
            "Parameters": [0, "0"],
            "ParameterStart": [0, "0"],
            "ParameterStep": [0, "1"]
        }

        delimiter = gConfig.getValue("/Website/Launchpad/ListSeparator", ',')
        options = self.__getOptionsFromCS(delimiter=delimiter)
        #     platform = self.__getPlatform()
        #     if platform and options:
        #       if not options.has_key("Platform"):
        #         options[ "Platform" ] = platform
        #       else:
        #         csPlatform = list(options[ "Platform" ])
        #         allPlatforms = csPlatform + platform
        #         platform = uniqueElements(allPlatforms)
        #         options[ "Platform" ] = platform
        gLogger.debug("Combined options from CS: %s" % options)
        override = gConfig.getValue("/Website/Launchpad/OptionsOverride",
                                    False)
        gLogger.info("end __getLaunchpadOpts")

        #    Updating the default values from OptionsOverride configuration branch

        for key in options:
            if key not in defaultParams:
                defaultParams[key] = [0, ""]
            defaultParams[key][1] = options[key][0]


#    Reading of the predefined sets of launchpad parameters values

        obj = Operations()
        predefinedSets = {}

        launchpadSections = obj.getSections("Launchpad")
        import pprint
        if launchpadSections['OK']:
            for section in launchpadSections["Value"]:
                predefinedSets[section] = {}
                sectionOptions = obj.getOptionsDict("Launchpad/" + section)
                pprint.pprint(sectionOptions)
                if sectionOptions['OK']:
                    predefinedSets[section] = sectionOptions["Value"]

        self.write({
            "success": "true",
            "result": defaultParams,
            "predefinedSets": predefinedSets
        })
Esempio n. 33
0
    def web_getLaunchpadSetupWithLFNs(self):
        #on the fly file catalog for advanced launchpad
        if not hasattr(self, 'fc'):
            userData = self.getSessionData()
            group = str(userData["user"]["group"])
            vo = getVOForGroup(group)
            self.fc = FileCatalog(vo=vo)

        self.set_header('Content-type', 'text/plain')
        lfnList = []
        arguments = self.request.arguments
        gLogger.always(
            "submit: incoming arguments %s to getLaunchpadSetupWithLFNs" %
            arguments)
        lfnStr = str(arguments['path'][0])
        lfnList = lfnStr.split(',')
        #checks if the experiments folder in lfn list has a rtg_def.m file at some subfolder
        gLogger.always("submit: checking if some rtg_def.m" % arguments)
        processed = []
        metaDict = {'type': 'info'}
        for lfn in lfnStr.split(','):
            pos_relative = lfn.find("/")
            pos_relative = lfn.find("/", pos_relative + 1)
            pos_relative = lfn.find("/", pos_relative + 1)
            pos_relative = lfn.find("/", pos_relative + 1)
            pos_relative = lfn.find("/", pos_relative + 1)
            experiment_lfn = lfn[0:pos_relative]
            if experiment_lfn in processed:
                continue
            processed.append(experiment_lfn)
            gLogger.always("checking rtg_def.m in %s" % experiment_lfn)
            result = self.fc.findFilesByMetadata(metaDict,
                                                 path=str(experiment_lfn))
            print "result"
            print result
            if not result['OK'] or not result['Value']:
                gLogger.error("Failed to get type info from $s, %s" %
                              (experiment_lfn, result["Message"]))
                continue
            for candidate_lfn in result['Value']:
                if candidate_lfn.find('rtg_def.m') > 0:
                    lfnList.append(candidate_lfn)

        totalfn = len(lfnList)
        ptlfn = ''
        current = 1
        for lfn in lfnList:
            ptlfn = ptlfn + lfn
            if current < totalfn:
                ptlfn = ptlfn + ', '
            current = current + 1

        defaultParams = {
            "JobName": [1, 'Eiscat'],
            "Executable": [1, "/bin/ls"],
            "Arguments": [1, "-ltrA"],
            "OutputSandbox": [1, "std.out, std.err"],
            "InputData": [1, ptlfn],
            "OutputData": [0, ""],
            "OutputSE": [1, "EISCAT-disk"],
            "OutputPath": [0, ""],
            "CPUTime": [0, "86400"],
            "Site": [0, ""],
            "BannedSite": [0, ""],
            "Platform": [0, "Linux_x86_64_glibc-2.5"],
            "Priority": [0, "5"],
            "StdError": [0, "std.err"],
            "StdOutput": [0, "std.out"],
            "Parameters": [0, "0"],
            "ParameterStart": [0, "0"],
            "ParameterStep": [0, "1"]
        }

        delimiter = gConfig.getValue("/Website/Launchpad/ListSeparator", ',')
        options = self.__getOptionsFromCS(delimiter=delimiter)
        #     platform = self.__getPlatform()
        #     if platform and options:
        #       if not options.has_key("Platform"):
        #         options[ "Platform" ] = platform
        #       else:
        #         csPlatform = list(options[ "Platform" ])
        #         allPlatforms = csPlatform + platform
        #         platform = uniqueElements(allPlatforms)
        #         options[ "Platform" ] = platform
        gLogger.debug("Options from CS: %s" % options)
        override = gConfig.getValue("/Website/Launchpad/OptionsOverride",
                                    False)
        gLogger.info("end __getLaunchpadOpts")

        #    Updating the default values from OptionsOverride configuration branch,

        for key in options:
            if key not in defaultParams:
                defaultParams[key] = [0, ""]
            defaultParams[key][1] = options[key][0]
        gLogger.info(
            "Default params + override from /Website/Launchpad/OptionsOverride -> %s"
            % defaultParams)

        #    Reading of the predefined sets of launchpad parameters values

        obj = Operations()
        predefinedSets = {}

        launchpadSections = obj.getSections("Launchpad")
        import pprint
        if launchpadSections['OK']:
            for section in launchpadSections["Value"]:
                predefinedSets[section] = {}
                sectionOptions = obj.getOptionsDict("Launchpad/" + section)
                pprint.pprint(sectionOptions)
                if sectionOptions['OK']:
                    predefinedSets[section] = sectionOptions["Value"]

        self.write({
            "success": "true",
            "result": defaultParams,
            "predefinedSets": predefinedSets
        })
Esempio n. 34
0
    def listDirectory(self, path):
        """ List the supplied path. First checks whether the path is a directory then gets the contents.
    """
        res = self.__checkArgumentFormat(path)
        if not res['OK']:
            return res
        urls = res['Value']
        gLogger.debug(
            "RFIOStorage.listDirectory: Attempting to list %s directories." %
            len(urls))
        res = self.isDirectory(urls)
        if not res['OK']:
            return res
        successful = {}
        failed = res['Value']['Failed']
        directories = []
        for url, isDirectory in res['Value']['Successful'].items():
            if isDirectory:
                directories.append(url)
            else:
                errStr = "RFIOStorage.listDirectory: Directory does not exist."
                gLogger.error(errStr, url)
                failed[url] = errStr

        for directory in directories:
            comm = "nsls -l %s" % directory
            res = shellCall(self.timeout, comm)
            if res['OK']:
                returncode, stdout, stderr = res['Value']
                if not returncode == 0:
                    errStr = "RFIOStorage.listDirectory: Failed to list directory."
                    gLogger.error(errStr, "%s %s" % (directory, stderr))
                    failed[directory] = errStr
                else:
                    subDirs = {}
                    files = {}
                    successful[directory] = {}
                    for line in stdout.splitlines():
                        permissions, _subdirs, _owner, _group, size, _month, _date, _timeYear, pfn = line.split(
                        )
                        if not pfn == 'dirac_directory':
                            path = "%s/%s" % (directory, pfn)
                            if permissions[0] == 'd':
                                # If the subpath is a directory
                                subDirs[path] = True
                            elif permissions[0] == 'm':
                                # In the case that the path is a migrated file
                                files[path] = {
                                    'Size': int(size),
                                    'Migrated': 1
                                }
                            else:
                                # In the case that the path is not migrated file
                                files[path] = {
                                    'Size': int(size),
                                    'Migrated': 0
                                }
                    successful[directory]['SubDirs'] = subDirs
                    successful[directory]['Files'] = files
            else:
                errStr = "RFIOStorage.listDirectory: Completely failed to list directory."
                gLogger.error(errStr, "%s %s" % (directory, res['Message']))
                return S_ERROR(errStr)
        resDict = {'Failed': failed, 'Successful': successful}
        return S_OK(resDict)
Esempio n. 35
0
    def __putFile(self, src_file, dest_url, sourceSize):
        """Put a copy of the local file to the current directory on the physical storage
    """
        # Pre-transfer check
        res = self.__executeOperation(dest_url, 'exists')
        if not res['OK']:
            gLogger.debug(
                "RFIOStorage.__putFile: Failed to find pre-existance of destination file."
            )
            return res
        if res['Value']:
            res = self.__executeOperation(dest_url, 'removeFile')
            if not res['OK']:
                gLogger.debug(
                    "RFIOStorage.__putFile: Failed to remove remote file %s." %
                    dest_url)
            else:
                gLogger.debug(
                    "RFIOStorage.__putFile: Removed remote file %s." %
                    dest_url)
        if not os.path.exists(src_file):
            errStr = "RFIOStorage.__putFile: The source local file does not exist."
            gLogger.error(errStr, src_file)
            return S_ERROR(errStr)
        sourceSize = getSize(src_file)
        if sourceSize == -1:
            errStr = "RFIOStorage.__putFile: Failed to get file size."
            gLogger.error(errStr, src_file)
            return S_ERROR(errStr)

        res = self.__getTransportURL(dest_url)
        if not res['OK']:
            gLogger.debug(
                "RFIOStorage.__putFile: Failed to get transport URL for file.")
            return res
        turl = res['Value']

        MIN_BANDWIDTH = 1024 * 100  # 100 KB/s
        timeout = sourceSize / MIN_BANDWIDTH + 300
        gLogger.debug("RFIOStorage.putFile: Executing transfer of %s to %s" %
                      (src_file, turl))
        comm = "rfcp %s '%s'" % (src_file, turl)
        res = shellCall(timeout, comm)
        if res['OK']:
            returncode, _stdout, stderr = res['Value']
            if returncode == 0:
                gLogger.debug(
                    'RFIOStorage.putFile: Put file to storage, performing post transfer check.'
                )
                res = self.__executeOperation(dest_url, 'getFileSize')
                if res['OK']:
                    destinationSize = res['Value']
                    if sourceSize == destinationSize:
                        gLogger.debug(
                            "RFIOStorage.__putFile: Post transfer check successful."
                        )
                        return S_OK(destinationSize)
                errorMessage = "RFIOStorage.__putFile: Source and destination file sizes do not match."
                gLogger.error(errorMessage, dest_url)
            else:
                errStr = "RFIOStorage.__putFile: Failed to put file to remote storage."
                gLogger.error(errStr, stderr)
                errorMessage = "%s %s" % (errStr, stderr)
        else:
            errStr = "RFIOStorage.__putFile: Failed to put file to remote storage."
            gLogger.error(errStr, res['Message'])
            errorMessage = "%s %s" % (errStr, res['Message'])
        res = self.__executeOperation(dest_url, 'removeFile')
        if res['OK']:
            gLogger.debug(
                "RFIOStorage.__putFile: Removed remote file remnant %s." %
                dest_url)
        else:
            gLogger.debug(
                "RFIOStorage.__putFile: Unable to remove remote file remnant %s."
                % dest_url)
        return S_ERROR(errorMessage)
Esempio n. 36
0
  def __componentAction( self , action = None ):

    """
    Actions which should be done on components. The only parameters is an action
    to perform.
    Returns standard JSON response structure with with service response
    or error messages
    """

    DN = getUserDN()
    group = getSelectedGroup()

    if ( not action ) or ( not len( action ) > 0 ):
      error = "Action is not defined or has zero length"
      gLogger.debug( error )
      return { "success" : "false" , "error" : error }

    if action not in [ "restart" , "start" , "stop" , "uninstall" ]:
      error = "The request parameters action '%s' is unknown" % action
      gLogger.debug( error )
      return { "success" : "false" , "error" : error }
    self.action = action

    result = dict()
    for i in request.params:
      if i == "action":
        continue

      target = i.split( " @ " , 1 )
      if not len( target ) == 2:
        continue

      system = request.params[ i ]
      gLogger.always( "System: %s" % system )
      host = target[ 1 ]
      gLogger.always( "Host: %s" % host )
      component = target[ 0 ]
      gLogger.always( "Component: %s" % component )
      if not host in result:
        result[ host ] = list()
      result[ host ].append( [ system , component ] )

    if not len( result ) > 0:
      error = "Failed to get component(s) for %s" % action
      gLogger.debug( error )
      return { "success" : "false" , "error" : error }
      
    gLogger.always( result )
    self.actionSuccess = list()
    self.actionFailed = list()

    for hostname in result.keys():

      if not len( result[ hostname ] ) > 0:
        continue

      client = SystemAdministratorClient( hostname , None , delegatedDN=DN ,
                                          delegatedGroup=group )

      for i in result[ hostname ]:

        system = i[ 0 ]
        component = i[ 1 ]

        try:
          if action == "restart":
            result = client.restartComponent( system , component )
          elif action == "start":
            result = client.startComponent( system , component )
          elif action == "stop":
            result = client.stopComponent( system , component )
          elif action == "uninstall":
            result = client.uninstallComponent( system , component )
          else:
            result = list()
            result[ "Message" ] = "Action %s is not valid" % action
        except Exception, x:
          result = list()
          result[ "Message" ] = "Exception: %s" % str( x )
        gLogger.debug( "Result: %s" % result )

        if not result[ "OK" ]:
          error = hostname + ": " + result[ "Message" ]
          self.actionFailed.append( error )
          gLogger.error( "Failure during component %s: %s" % ( action , error ) )
        else:
          gLogger.always( "Successfully %s component %s" % ( action , component ) )
          self.actionSuccess.append( component )
Esempio n. 37
0
    def _getCatalogDirectoryContents(self, lfnDirs):
        """ Obtain the contents of the supplied directory, recursively
    """
        def _getDirectoryContent(directory):
            """ Inner function: recursively scan a directory, returns list of LFNs
      """
            filesInDirectory = {}

            gLogger.debug("Examining %s" % directory)

            res = self.fc.listDirectory(directory)
            if not res['OK']:
                gLogger.error('Failed to get directory contents',
                              res['Message'])
                return res
            if directory in res['Value']['Failed']:
                gLogger.error(
                    'Failed to get directory content',
                    '%s %s' % (directory, res['Value']['Failed'][directory]))
                return S_ERROR('Failed to get directory content')
            if directory not in res['Value']['Successful']:
                return S_ERROR('Directory not existing?')

            # first, adding the files found in the current directory
            gLogger.debug(
                "Files in %s: %d" %
                (directory, len(
                    res['Value']['Successful'][directory]['Files'])))
            filesInDirectory.update(
                res['Value']['Successful'][directory]['Files'])

            #then, looking for subDirectories content
            if res['Value']['Successful'][directory]['SubDirs']:
                for l_dir in res['Value']['Successful'][directory]['SubDirs']:
                    #recursion here
                    subDirContent = _getDirectoryContent(l_dir)
                    if not subDirContent['OK']:
                        return subDirContent
                    else:
                        filesInDirectory.update(subDirContent['Value'])

            return S_OK(filesInDirectory)

        gLogger.info('Obtaining the catalog contents for %d directories' %
                     len(lfnDirs))

        allFiles = {}
        for lfnDir in lfnDirs:
            dirContent = _getDirectoryContent(lfnDir)
            if not dirContent['OK']:
                return dirContent
            else:
                gLogger.debug("Content of directory %s: %d files" %
                              (lfnDir, len(dirContent['Value'])))
                allFiles.update(dirContent['Value'])

        gLogger.debug("Content of directories examined: %d files" %
                      len(allFiles))

        replicas = self.fc.getReplicas(list(allFiles))
        if not replicas['OK']:
            return replicas
        if replicas['Value']['Failed']:
            return S_ERROR("Failures in replicas discovery")

        return S_OK({
            'Metadata': allFiles,
            'Replicas': replicas['Value']['Successful']
        })
Esempio n. 38
0
    def web_getFilesData(self):
        req = self.__request()
        gLogger.always(req)
        gLogger.debug("submit: incoming request %s" % req)
        result = yield self.threadTask(self.fc.findFilesByMetadataWeb,
                                       req["selection"], req["path"],
                                       self.S_NUMBER, self.L_NUMBER)
        gLogger.debug("submit: result of findFilesByMetadataDetailed %s" %
                      result)
        if not result["OK"]:
            gLogger.error("submit: %s" % result["Message"])
            self.finish({"success": "false", "error": result["Message"]})
            return
        result = result["Value"]

        if not len(result) > 0:
            self.finish({
                "success": "true",
                "result": [],
                "total": 0,
                "date": "-"
            })
            return

        total = result["TotalRecords"]
        result = result["Records"]

        callback = list()
        for key, value in result.items():

            size = ""
            if "Size" in value:
                size = value["Size"]

            date = ""
            if "CreationDate" in value:
                date = str(value["CreationDate"])

            meta = ""
            if "Metadata" in value:
                m = value["Metadata"]
                meta = '; '.join(['%s: %s' % (i, j) for (i, j) in m.items()])

            dirnameList = key.split("/")
            dirname = "/".join(dirnameList[:len(dirnameList) - 1])
            filename = dirnameList[len(dirnameList) - 1:]

            callback.append({
                "fullfilename": key,
                "dirname": dirname,
                "filename": filename,
                "date": date,
                "size": size,
                "metadata": meta
            })
        timestamp = Time.dateTime().strftime("%Y-%m-%d %H:%M [UTC]")
        self.finish({
            "success": "true",
            "result": callback,
            "total": total,
            "date": timestamp
        })
Esempio n. 39
0
    def setUpClass(cls):
        cls.failed = False

        # Add configuration
        cfg = CFG()
        cfg.loadFromBuffer(diracTestCACFG)
        gConfig.loadCFG(cfg)
        cfg.loadFromBuffer(userCFG)
        gConfig.loadCFG(cfg)

        # Prepare CA
        lines = []
        cfgDict = {}
        cls.caPath = os.path.join(certsPath, "ca")
        cls.caConfigFile = os.path.join(cls.caPath, "openssl_config_ca.cnf")
        # Save original configuration file
        shutil.copyfile(cls.caConfigFile, cls.caConfigFile + "bak")
        # Parse
        fields = [
            "dir", "database", "serial", "new_certs_dir", "private_key",
            "certificate"
        ]
        with open(cls.caConfigFile, "r") as caCFG:
            for line in caCFG:
                if re.findall("=", re.sub(r"#.*", "", line)):
                    field = re.sub(r"#.*", "",
                                   line).replace(" ",
                                                 "").rstrip().split("=")[0]
                    line = "dir = %s #PUT THE RIGHT DIR HERE!\n" % (
                        cls.caPath) if field == "dir" else line
                    val = re.sub(r"#.*", "",
                                 line).replace(" ", "").rstrip().split("=")[1]
                    if field in fields:
                        for i in fields:
                            if cfgDict.get(i):
                                val = val.replace("$%s" % i, cfgDict[i])
                        cfgDict[field] = val
                        if not cfgDict[field]:
                            cls.failed = "%s have empty value in %s" % (
                                field, cls.caConfigFile)
                lines.append(line)
        with open(cls.caConfigFile, "w") as caCFG:
            caCFG.writelines(lines)
        for field in fields:
            if field not in cfgDict.keys():
                cls.failed = "%s value is absent in %s" % (field,
                                                           cls.caConfigFile)
        cls.hostCert = os.path.join(certsPath, "host/hostcert.pem")
        cls.hostKey = os.path.join(certsPath, "host/hostkey.pem")
        cls.caCert = cfgDict["certificate"]
        cls.caKey = cfgDict["private_key"]
        os.chmod(cls.caKey, stat.S_IREAD)
        # Check directory for new certificates
        cls.newCertDir = cfgDict["new_certs_dir"]
        if not os.path.exists(cls.newCertDir):
            os.makedirs(cls.newCertDir)
        for f in os.listdir(cls.newCertDir):
            os.remove(os.path.join(cls.newCertDir, f))
        # Empty the certificate database
        cls.index = cfgDict["database"]
        with open(cls.index, "w") as indx:
            indx.write("")
        # Write down serial
        cls.serial = cfgDict["serial"]
        with open(cls.serial, "w") as serialFile:
            serialFile.write("1000")

        # Create temporaly directory for users certificates
        cls.userDir = tempfile.mkdtemp(dir=certsPath)

        # Create user certificates
        for userName in ["no_user", "user", "user_1", "user_2", "user_3"]:
            userConf = """[ req ]
        default_bits           = 4096
        encrypt_key            = yes
        distinguished_name     = req_dn
        prompt                 = no
        req_extensions         = v3_req
        [ req_dn ]
        C                      = CC
        O                      = DN
        0.O                    = DIRAC
        CN                     = %s
        [ v3_req ]
        # Extensions for client certificates (`man x509v3_config`).
        nsComment = "OpenSSL Generated Client Certificate"
        keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
        extendedKeyUsage = clientAuth
        """ % (userName)
            userConfFile = os.path.join(cls.userDir, userName + ".cnf")
            userReqFile = os.path.join(cls.userDir, userName + ".req")
            userKeyFile = os.path.join(cls.userDir, userName + ".key.pem")
            userCertFile = os.path.join(cls.userDir, userName + ".cert.pem")
            with open(userConfFile, "w") as f:
                f.write(userConf)
            status, output = commands.getstatusoutput(
                "openssl genrsa -out %s" % userKeyFile)
            if status:
                gLogger.error(output)
                exit()
            gLogger.debug(output)
            os.chmod(userKeyFile, stat.S_IREAD)
            status, output = commands.getstatusoutput(
                "openssl req -config %s -key %s -new -out %s" %
                (userConfFile, userKeyFile, userReqFile))
            if status:
                gLogger.error(output)
                exit()
            gLogger.debug(output)
            cmd = "openssl ca -config %s -extensions usr_cert -batch -days 375 -in %s -out %s"
            cmd = cmd % (cls.caConfigFile, userReqFile, userCertFile)
            status, output = commands.getstatusoutput(cmd)
            if status:
                gLogger.error(output)
                exit()
            gLogger.debug(output)

        # Result
        status, output = commands.getstatusoutput("ls -al %s" % cls.userDir)
        if status:
            gLogger.error(output)
            exit()
        gLogger.debug("User certificates:\n", output)
Esempio n. 40
0
    def __request(self):
        req = {"selection": {}, "path": "/"}

        self.L_NUMBER = 25
        if self.request.arguments.has_key("limit") and len(
                self.request.arguments["limit"][0]) > 0:
            self.L_NUMBER = int(self.request.arguments["limit"][0])

        self.S_NUMBER = 0
        if self.request.arguments.has_key("start") and len(
                self.request.arguments["start"][0]) > 0:
            self.S_NUMBER = int(self.request.arguments["start"][0])

        result = gConfig.getOption("/Website/ListSeparator")
        if result["OK"]:
            separator = result["Value"]
        else:
            separator = ":::"

        result = self.fc.getMetadataFields()
        gLogger.debug("request: %s" % result)

        if not result["OK"]:
            gLogger.error("request: %s" % result["Message"])
            return req
        result = result["Value"]

        if not result.has_key("FileMetaFields"):
            error = "Service response has no FileMetaFields key. Return empty dict"
            gLogger.error("request: %s" % error)
            return req

        if not result.has_key("DirectoryMetaFields"):
            error = "Service response has no DirectoryMetaFields key. Return empty dict"
            gLogger.error("request: %s" % error)
            return req

        filemeta = result["FileMetaFields"]
        dirmeta = result["DirectoryMetaFields"]

        meta = []
        for key, value in dirmeta.items():
            meta.append(key)

        gLogger.always("request: metafields: %s " % meta)

        for param in self.request.arguments:

            tmp = str(param).split('.')

            if len(tmp) != 3:
                continue

            name = tmp[1]
            logic = tmp[2]
            value = self.request.arguments[param][0].split("|")

            if not logic in ["in", "nin", "=", "!=", ">=", "<=", ">", "<"]:
                gLogger.always("Operand '%s' is not supported " % logic)
                continue

            if name in meta:
                #check existence of the 'name' section
                if not req["selection"].has_key(name):
                    req["selection"][name] = dict()

                #check existence of the 'sign' section
                if not req["selection"][name].has_key(logic):
                    if value[0] == "v":
                        req["selection"][name][logic] = ""
                    elif value[0] == "s":
                        req["selection"][name][logic] = []

                if value[0] == "v":
                    req["selection"][name][logic] = value[1]
                elif value[0] == "s":
                    req["selection"][name][logic] += value[1].split(":::")
        if self.request.arguments.has_key("path"):
            req["path"] = self.request.arguments["path"][0]
        gLogger.always("REQ: ", req)
        return req
Esempio n. 41
0
    def bulk_index(self,
                   indexprefix,
                   doc_type,
                   data,
                   mapping=None,
                   period=None):
        """
    :param str indexPrefix: it is the index name.
    :param str doc_type: the type of the document
    :param dict data: contains a list of dictionary
    :paran dict mapping: the mapping used by elasticsearch
    :param str period: We can specify, which kind of indexes will be created. Currently only daily and monthly indexes are supported.
    """
        gLogger.info("%d records will be insert to %s" % (len(data), doc_type))
        if mapping is None:
            mapping = {}

        indexName = generateFullIndexName(indexprefix, period)
        gLogger.debug("inserting datat to %s index" % indexName)
        if not self.exists(indexName):
            retVal = self.createIndex(indexprefix, mapping, period)
            if not retVal['OK']:
                return retVal
        docs = []
        for row in data:
            body = {'_index': indexName, '_type': doc_type, '_source': {}}
            body['_source'] = row

            if 'timestamp' not in row:
                gLogger.warn(
                    "timestamp is not given! Note: the actual time is used!")

            timestamp = row.get(
                'timestamp', int(Time.toEpoch())
            )  # if the timestamp is not provided, we use the current utc time.
            try:
                if isinstance(timestamp, datetime):
                    body['_source']['timestamp'] = int(
                        timestamp.strftime('%s')) * 1000
                elif isinstance(timestamp, basestring):
                    timeobj = datetime.strptime(timestamp,
                                                '%Y-%m-%d %H:%M:%S.%f')
                    body['_source']['timestamp'] = int(
                        timeobj.strftime('%s')) * 1000
                else:  # we assume  the timestamp is an unix epoch time (integer).
                    body['_source']['timestamp'] = timestamp * 1000
            except (TypeError, ValueError) as e:
                # in case we are not able to convert the timestamp to epoch time....
                gLogger.error("Wrong timestamp", e)
                body['_source']['timestamp'] = int(Time.toEpoch()) * 1000
            docs += [body]
        try:
            res = bulk(self.__client, docs, chunk_size=self.__chunk_size)
        except BulkIndexError as e:
            return S_ERROR(e)

        if res[0] == len(docs):
            # we have inserted all documents...
            return S_OK(len(docs))
        else:
            return S_ERROR(res)
        return res
Esempio n. 42
0
                                  (os.path.join(directory, fname)))
                    tarFile = tarfile.open(os.path.join(directory, fname), 'r')
                    for member in tarFile.getmembers():
                        tarFile.extract(member, directory)
                        poolCatalogList.append(
                            os.path.join(directory, member.name))
                except Exception, x:
                    gLogger.error('Could not untar %s with exception %s' %
                                  (fname, str(x)))
            else:
                poolCatalogList.append(fname)

    poolCatalogList = uniqueElements(poolCatalogList)

    #Now have list of all XML files but some may not be Pool XML catalogs...
    finalCatList = []
    for possibleCat in poolCatalogList:
        try:
            cat = PoolXMLCatalog(possibleCat)
            finalCatList.append(possibleCat)
        except Exception, x:
            gLogger.debug('Ignoring non-POOL catalogue file %s' % possibleCat)

    gLogger.debug('Final list of catalog files are: %s' %
                  string.join(finalCatList, ', '))

    return finalCatList


#############################################################################
Esempio n. 43
0
  def retrieveAggregatedData( self, typeName, startTime, endTime, interval, selectFields, condDict, grouping, metainfo ):
    """
    Get data from the DB using simple aggregations. Note: this method is equivalent to retrieveBucketedData.
    The different is the dynamic bucketing. We do not perform dynamic bucketing on the raw data.

    :param str typeName name of the monitoring type
    :param int startTime  epoch objects.
    :param int endtime epoch objects.
    :param dict condDict -> conditions for the query
                  key -> name of the field
                  value -> list of possible values

    """
#    {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1474271462000, 'lte': 1474357862000}}}]}}]}}, 'aggs': {'end_data': {'date_histogram': {'field': 'timestamp', 'interval': '30m'}, 'aggs': {'tt': {'terms': {'field': 'component', 'size': 0}, 'aggs': {'m1': {'avg': {'
#    field': 'threads'}}}}}}}}
#
#     query = [Q( 'range',timestamp = {'lte':1474357862000,'gte': 1474271462000} )]
#
#     a = A('terms', field = 'component', size = 0 )
#     a.metric('m1', 'avg', field = 'threads' )
#
#     s = Search(using=cl, index = 'lhcb-certification_componentmonitoring-index-*')
#
#     s = s.filter( 'bool', must = query )
#     s = s.aggs.bucket('end_data', 'date_histogram', field='timestamp', interval='30m').metric( 'tt', a )

    retVal = self.getIndexName( typeName )
    if not retVal['OK']:
      return retVal

    # default is average
    aggregator = metainfo.get( 'metric', 'avg' )

    indexName = "%s*" % ( retVal['Value'] )
    q = [self._Q( 'range',
                  timestamp = {'lte':endTime * 1000,
                               'gte': startTime * 1000} )]
    for cond in condDict:
      query = None
      for condValue in condDict[cond]:
        kwargs = {cond: condValue}
        if query:
          query = query | self._Q( 'match', **kwargs )
        else:
          query = self._Q( 'match', **kwargs )
      q += [query]

    a1 = self._A( 'terms', field = grouping, size = 0 )
    a1.metric( 'm1', aggregator, field = selectFields[0] )

    s = self._Search( indexName )
    s = s.filter( 'bool', must = q )
    s.aggs.bucket( 'end_data',
                   'date_histogram',
                   field = 'timestamp',
                   interval = interval ).metric( 'tt', a1 )

    #s.fields( ['timestamp'] + selectFields )
    s = s.extra( size = 0 )  # do not get the hits!

    gLogger.debug( 'Query:', s.to_dict() )
    retVal = s.execute()

    result = {}
    for bucket in retVal.aggregations['end_data'].buckets:
      # each bucket key is a time (unix epoch and usual datetime
      bucketTime = bucket.key / 1000
      for value in bucket['tt'].buckets:
        # each bucket contains an agregation called tt which sum/avg of the metric.
        if value.key not in result:
          result[value.key] = {bucketTime:value.m1.value if value.m1.value else 0 } #TODO: this is kind of hack.
          #we can use a default value for pipeline aggregation. EL promised that we can use default value for simple aggregation. Later to be checked.
        else:
          result[value.key].update( {bucketTime:value.m1.value if value.m1.value else 0} )
    # the result format is { 'grouping':{timestamp:value, timestamp:value} for example : {u'Bookkeeping_BookkeepingManager': {1474300800: 4.0, 1474344000: 4.0, 1474331400: 4.0, 1
    # 474302600: 4.0, 1474365600: 4.0, 1474304400: 4.0, 1474320600: 4.0, 1474360200: 4.0, 1474306200: 4.0, 1474356600: 4.0, 1474336800: 4.0, 1474326000: 4.0, 1474315200: 4.0,
    # 1474281000: 4.0, 1474309800: 4.0, 1474338600: 4.0, 1474311600: 4.0, 1474317000: 4.0, 1474367400: 4.0, 1474333200: 4.0, 1474284600: 4.0, 1474362000: 4.0,
    # 1474327800: 4.0, 1474345800: 4.0, 1474286400: 4.0, 1474308000: 4.0, 1474322400: 4.0, 1474288200: 4.0, 1474351200: 4.0, 1474282800: 4.0, 1474347600: 4.0,
    # 1474313400: 4.0, 1474349400: 4.0, 1474297200: 4.0, 1474340400: 4.0, 1474291800: 4.0, 1474335000: 4.0, 1474293600: 4.0, 1474290000: 4.0, 1474363800: 4.0,
    # 1474329600: 4.0, 1474353000: 4.0, 1474358400: 4.0, 1474324200: 4.0, 1474354800: 4.0, 1474295400: 4.0, 1474318800: 4.0, 1474299000: 4.0, 1474342200: 4.0},
    # u'Framework_SystemAdministrator': {1474300800: 8.0, 1474344000: 8.0, 1474331400: 8.0, 1474302600: 8.0, 1474365600: 8.0, 1474304400: 8.0, 1474320600: 8.0,
    # 1474360200: 8.0, 1474306200: 8.0, 1474356600: 8.0, 1474336800: 8.0, 1474326000: 8.0, 1474315200: 8.0, 1474281000: 8.0, 1474309800: 8.0, 1474338600: 8.0,
    # 1474311600: 8.0, 1474317000: 8.0, 1474367400: 8.0, 1474333200: 8.0, 1474284600: 8.0, 1474362000: 8.0, 1474327800: 8.0, 1474345800: 8.0, 1474286400: 8.0,
    # 1474308000: 8.0, 1474322400: 8.0, 1474288200: 8.0, 1474351200: 8.0, 1474282800: 8.0, 1474347600: 8.0, 1474313400: 8.0, 1474349400: 8.0, 1474297200: 8.0,
    # 1474340400: 8.0, 1474291800: 8.0, 1474335000: 8.0, 1474293600: 8.0, 1474290000: 8.0, 1474363800: 8.0, 1474329600: 8.0, 1474353000: 8.0, 1474358400: 8.0,
    # 1474324200: 8.0, 1474354800: 8.0, 1474295400: 8.0, 1474318800: 8.0, 1474299000: 8.0, 1474342200: 8.0}}

    return S_OK( result )
Esempio n. 44
0
readBanned = []
writeBanned = []
storageCFGBase = "/Resources/StorageElements"
for se in ses:
    res = gConfig.getOptionsDict("%s/%s" % (storageCFGBase, se))
    if not res['OK']:
        gLogger.error("Storage Element %s does not exist" % se)
        continue
    existingOptions = res['Value']
    if read and existingOptions['ReadAccess'] == "Active":
        res = csAPI.setOption("%s/%s/ReadAccess" % (storageCFGBase, se),
                              "InActive")
        if not res['OK']:
            gLogger.error("Failed to update %s read access to InActive" % se)
        else:
            gLogger.debug("Successfully updated %s read access to InActive" %
                          se)
            readBanned.append(se)
    if write and existingOptions['WriteAccess'] == "Active":
        res = csAPI.setOption("%s/%s/WriteAccess" % (storageCFGBase, se),
                              "InActive")
        if not res['OK']:
            gLogger.error("Failed to update %s write access to InActive" % se)
        else:
            gLogger.debug("Successfully updated %s write access to InActive" %
                          se)
            writeBanned.append(se)
res = csAPI.commitChanges()
if not res['OK']:
    gLogger.error("Failed to commit changes to CS", res['Message'])
    DIRAC.exit(-1)
Esempio n. 45
0
def getPoliciesThatApply(decisionParams):
    """
    Method that sanitizes the input parameters and returns the policies that
    match them. Matches the input dictionary with the policies configuration in
    the CS. It returns a list of policy dictionaries that matched.
  """

    # InfoGetter is being called from SiteInspector Agent

    decisionParams = _sanitizedecisionParams(decisionParams)
    gLogger.debug("Sanitized decisionParams: %s" % str(decisionParams))

    policiesThatApply = []

    # Get policies configuration metadata from CS.
    policiesConfig = RssConfiguration.getPolicies()
    if not policiesConfig['OK']:
        return policiesConfig
    policiesConfig = policiesConfig['Value']
    gLogger.debug("All policies: %s" % str(policiesConfig))

    # Each policy, has the following format
    # <policyName>
    # \
    #  policyType = <policyType>
    #  matchParams
    #  \
    #   ...
    #  configParams
    #  \
    #   ...

    # Get policies that match the given decisionParameters
    for policyName, policySetup in policiesConfig.items():

        # The parameter policyType replaces policyName, so if it is not present,
        # we pick policyName
        try:
            policyType = policySetup['policyType'][0]
        except KeyError:
            policyType = policyName
            # continue

        # The section matchParams is not mandatory, so we set {} as default.
        policyMatchParams = policySetup.get('matchParams', {})
        gLogger.debug("matchParams of %s: %s" %
                      (policyName, str(policyMatchParams)))

        # FIXME: make sure the values in the policyConfigParams dictionary are typed !!
        policyConfigParams = {}
        # policyConfigParams = policySetup.get( 'configParams', {} )
        policyMatch = Utils.configMatch(decisionParams, policyMatchParams)
        gLogger.debug("PolicyMatch for decisionParams %s: %s" %
                      (decisionParams, str(policyMatch)))

        # WARNING: we need an additional filtering function when the matching
        # is not straightforward (e.g. when the policy specify a 'domain', while
        # the decisionParams has only the name of the element)
        if policyMatch and _filterPolicies(decisionParams, policyMatchParams):
            policiesThatApply.append(
                (policyName, policyType, policyConfigParams))

    gLogger.debug("policies that apply (before post-processing): %s" %
                  str(policiesThatApply))
    policiesThatApply = postProcessingPolicyList(policiesThatApply)
    gLogger.debug("policies that apply (after post-processing): %s" %
                  str(policiesThatApply))

    policiesToBeLoaded = []
    # Gets policies parameters from code.
    for policyName, policyType, _policyConfigParams in policiesThatApply:

        try:
            configModule = Utils.voimport(
                'DIRAC.ResourceStatusSystem.Policy.Configurations')
            policies = copy.deepcopy(configModule.POLICIESMETA)
            policyMeta = policies[policyType]
        except KeyError:
            continue

        # We are not going to use name / type anymore, but we keep them for debugging
        # and future usage.
        policyDict = {'name': policyName, 'type': policyType, 'args': {}}

        # args is one of the parameters we are going to use on the policies. We copy
        # the defaults and then we update if with whatever comes from the CS.
        policyDict.update(policyMeta)

        policiesToBeLoaded.append(policyDict)

    return S_OK(policiesToBeLoaded)
Esempio n. 46
0
 def submit(self):
     pagestart = time()
     RPC = getRPCClient("ResourceStatus/ResourceStatus")
     client = ResourceStatusClient(serviceIn=RPC)
     if not request.params.has_key("mode") or not len(
             request.params["mode"]) > 0:
         gLogger.error("The parameter 'mode' is absent")
         return {
             "success": "false",
             "error": "The parameter 'mode' is absent"
         }
     mode = request.params["mode"]
     gLogger.verbose("Requested mode is %s" % mode)
     if not mode in MODELIST:
         gLogger.error(
             "Parameter 'mode': %s is wrong. Should be one of the list %s" %
             (mode, MODELIST))
         return {"success": "false", "error": "Parameter 'mode' is wrong"}
     if mode in STORELIST:
         mode = 'StorageElement'
     gLogger.verbose("Selected mode is %s" % mode)
     req = self.__request()
     gLogger.info("getMonitoredsStatusWeb(%s,%s,%s,%s)" %
                  (mode, req, P_NUMBER, R_NUMBER))
     result = client.getMonitoredsStatusWeb(mode, req, P_NUMBER, R_NUMBER)
     gLogger.debug("Call result: %s" % result)
     if not result["OK"]:
         error = result["Message"]
         gLogger.error(error)
         return {"success": "false", "error": error}
     result = result["Value"]
     if not result.has_key(
             "TotalRecords") or not result["TotalRecords"] > 0:
         return {
             "success": "false",
             "error": "There were no data matching your selection"
         }
     if not result.has_key("ParameterNames") or not result.has_key(
             "Records"):
         return {"success": "false", "error": "Data structure is corrupted"}
     if not len(result["ParameterNames"]) > 0:
         return {
             "success": "false",
             "error": "ParameterNames field is undefined"
         }
     if not len(result["Records"]) > 0:
         return {
             "success": "false",
             "error": "There are no data to display"
         }
     c.result = []
     records = result["Records"]
     head = result["ParameterNames"]
     headLength = len(head)
     countryCode = self.__countries()
     for i in records:
         tmp = {}
         for j in range(0, headLength):
             tmp[head[j]] = i[j]
             if mode == "Resource":
                 if countryCode.has_key(i[4]):
                     tmp["FullCountry"] = countryCode[i[4]]
                 else:
                     tmp["Country"] = "Unknown"
                     tmp["FullCountry"] = "Unknown"
             else:
                 if countryCode.has_key(i[3]):
                     tmp["FullCountry"] = countryCode[i[3]]
                 else:
                     tmp["Country"] = "Unknown"
                     tmp["FullCountry"] = "Unknown"
         c.result.append(tmp)
     total = result["TotalRecords"]
     if result.has_key("Extras"):
         extra = result["Extras"]
         c.result = {
             "success": "true",
             "result": c.result,
             "total": total,
             "extra": extra
         }
     else:
         c.result = {"success": "true", "result": c.result, "total": total}
     return c.result
Esempio n. 47
0
  def _ByJobType( self ):
    """ By default, all sites are allowed to do every job. The actual rules are freely specified in the Operation JobTypeMapping section.
        The content of the section may look like this:

        User
        {
          Exclude = PAK
          Exclude += Ferrara
          Exclude += Bologna
          Exclude += Paris
          Exclude += CERN
          Exclude += IN2P3
          Allow
          {
            Paris = IN2P3
            CERN = CERN
            IN2P3 = IN2P3
          }
        }
        DataReconstruction
        {
          Exclude = PAK
          Exclude += Ferrara
          Exclude += CERN
          Exclude += IN2P3
          Allow
          {
            Ferrara = CERN
            CERN = CERN
            IN2P3 = IN2P3
            IN2P3 += CERN
          }
        }
        Merge
        {
          Exclude = ALL
          Allow
          {
            CERN = CERN
            IN2P3 = IN2P3
          }
        }

        The sites in the exclusion list will be removed.
        The allow section says where each site may help another site

    """
    # 1. get sites list
    res = getSites()
    if not res['OK']:
      gLogger.error( "Could not get the list of sites", res['Message'] )
      return res
    destSites = set( res['Value'] )

    # 2. get JobTypeMapping "Exclude" value (and add autoAddedSites)
    gLogger.debug( "Getting JobTypeMapping 'Exclude' value (and add autoAddedSites)" )
    jobType = self.params['JobType']
    if not jobType:
      raise RuntimeError( "No jobType specified" )
    excludedSites = self.opsH.getValue( 'JobTypeMapping/%s/Exclude' % jobType, [] )
    gLogger.debug( "Explicitly excluded sites for %s task: %s" % ( jobType, ','.join( excludedSites ) ) )
    excludedSites += self.opsH.getValue( 'JobTypeMapping/AutoAddedSites', [] )
    gLogger.debug( "Full list of excluded sites for %s task: %s" % ( jobType, ','.join( excludedSites ) ) )

    # 3. removing sites in Exclude
    if not excludedSites:
      pass
    elif 'ALL' in excludedSites:
      destSites = set()
    else:
      destSites = destSites.difference( set( excludedSites ) )

    # 4. get JobTypeMapping "Allow" section
    res = self.opsH.getOptionsDict( 'JobTypeMapping/%s/Allow' % jobType )
    if not res['OK']:
      gLogger.verbose( res['Message'] )
      allowed = {}
    else:
      allowed = res['Value']
      for site in allowed:
        allowed[site] = fromChar( allowed[site] )

    # 5. add autoAddedSites, if requested
    autoAddedSites = self.opsH.getValue( 'JobTypeMapping/AutoAddedSites', [] )
    if autoAddedSites:
      for autoAddedSite in autoAddedSites:
        allowed.setdefault( autoAddedSite, [autoAddedSite] )
        if autoAddedSite not in allowed:
          allowed[autoAddedSite] = [autoAddedSite]
        else:
          allowed[autoAddedSite] = [autoAddedSite] + allowed[autoAddedSite]
    gLogger.debug( "Allowed sites for %s task: %s" % ( jobType, ','.join( allowed ) ) )

    # 6. Allowing sites that should be allowed
    if not self.params['TargetSE'] or self.params['TargetSE'] == 'Unknown':
      gLogger.warn( "TargetSE is not set: the destination sites list will be incomplete" )
    taskSiteDestination = self._BySE()

    for destSite, fromSites in allowed.iteritems():
      for fromSite in fromSites:
        if taskSiteDestination:
          if fromSite in taskSiteDestination:
            destSites.add( destSite )
        else:
          destSites.add( destSite )

    gLogger.verbose( "Computed list of destination sites for %s task with TargetSE %s: %s" % ( jobType,
                                                                                               self.params['TargetSE'],
                                                                                               ','.join( destSites ) ) )
    return destSites
Esempio n. 48
0
  def retrieveBucketedData( self, typeName, startTime, endTime, interval, selectFields, condDict, grouping, metainfo ):
    """
    Get data from the DB

    :param str typeName name of the monitoring type
    :param int startTime  epoch objects.
    :param int endtime epoch objects.
    :param dict condDict -> conditions for the query
                  key -> name of the field
                  value -> list of possible values

    """

    retVal = self.getIndexName( typeName )
    if not retVal['OK']:
      return retVal
    isAvgAgg = False
    # the data is used to fill the pie charts. This aggregation is used to average the buckets.
    if metainfo and metainfo.get( 'metric', 'sum' ) == 'avg':
      isAvgAgg = True

    indexName = "%s*" % ( retVal['Value'] )
    q = [self._Q( 'range',
                  timestamp = {'lte':endTime * 1000,
                               'gte': startTime * 1000} )]
      
    for cond in condDict:
      query = None
      for condValue in condDict[cond]:
        kwargs = {cond: condValue}
        if query:
          query = query | self._Q( 'match', **kwargs )
        else:
          query = self._Q( 'match', **kwargs )
      q += [query]

    a1 = self._A( 'terms', field = grouping, size = 0 )
    a2 = self._A( 'terms', field = 'timestamp' )
    a2.metric( 'total_jobs', 'sum', field = selectFields[0] )
    a1.bucket( 'end_data',
               'date_histogram',
               field = 'timestamp',
               interval = interval ).metric( 'tt', a2 ).pipeline( 'avg_monthly_sales',
                                                                  'avg_bucket',
                                                                  buckets_path = 'tt>total_jobs',
                                                                  gap_policy = 'insert_zeros' )
    if isAvgAgg:
      a1.pipeline( 'avg_total_jobs',
                   'avg_bucket',
                   buckets_path = 'end_data>avg_monthly_sales',
                   gap_policy = 'insert_zeros' )

    s = self._Search( indexName )
    s = s.filter( 'bool', must = q )
    s.aggs.bucket( '2', a1 )
    #s.fields( ['timestamp'] + selectFields )
    gLogger.debug( 'Query:', s.to_dict() )
    retVal = s.execute()

    gLogger.debug( "Query result", len( retVal ) )

    result = {}
    for i in retVal.aggregations['2'].buckets:
      if isAvgAgg:
        result[i.key] = i.avg_total_jobs.value
      else:
        site = i.key
        dp = {}
        for j in i.end_data.buckets:
          dp[j.key / 1000] = j.avg_monthly_sales.value
        result[site] = dp
    # the result format is { 'grouping':{timestamp:value, timestamp:value} for example :
    # {u'Bookkeeping_BookkeepingManager': {1474300800: 4.0, 1474344000: 4.0, 1474331400: 4.0, 1
    # 474302600: 4.0, 1474365600: 4.0, 1474304400: 4.0, 1474320600: 4.0, 1474360200: 4.0,
    # 1474306200: 4.0, 1474356600: 4.0, 1474336800: 4.0, 1474326000: 4.0, 1474315200: 4.0,
    # 1474281000: 4.0, 1474309800: 4.0, 1474338600: 4.0, 1474311600: 4.0, 1474317000: 4.0,
    # 1474367400: 4.0, 1474333200: 4.0, 1474284600: 4.0, 1474362000: 4.0,
    # 1474327800: 4.0, 1474345800: 4.0, 1474286400: 4.0, 1474308000: 4.0, 1474322400: 4.0,
    # 1474288200: 4.0, 1474351200: 4.0, 1474282800: 4.0, 1474347600: 4.0,
    # 1474313400: 4.0, 1474349400: 4.0, 1474297200: 4.0, 1474340400: 4.0, 1474291800: 4.0,
    # 1474335000: 4.0, 1474293600: 4.0, 1474290000: 4.0, 1474363800: 4.0,
    # 1474329600: 4.0, 1474353000: 4.0, 1474358400: 4.0, 1474324200: 4.0, 1474354800: 4.0,
    # 1474295400: 4.0, 1474318800: 4.0, 1474299000: 4.0, 1474342200: 4.0},
    # u'Framework_SystemAdministrator': {1474300800: 8.0, 1474344000: 8.0, 1474331400: 8.0,
    # 1474302600: 8.0, 1474365600: 8.0, 1474304400: 8.0, 1474320600: 8.0,
    # 1474360200: 8.0, 1474306200: 8.0, 1474356600: 8.0, 1474336800: 8.0, 1474326000: 8.0,
    # 1474315200: 8.0, 1474281000: 8.0, 1474309800: 8.0, 1474338600: 8.0,
    # 1474311600: 8.0, 1474317000: 8.0, 1474367400: 8.0, 1474333200: 8.0, 1474284600: 8.0,
    # 1474362000: 8.0, 1474327800: 8.0, 1474345800: 8.0, 1474286400: 8.0,
    # 1474308000: 8.0, 1474322400: 8.0, 1474288200: 8.0, 1474351200: 8.0, 1474282800: 8.0,
    # 1474347600: 8.0, 1474313400: 8.0, 1474349400: 8.0, 1474297200: 8.0,
    # 1474340400: 8.0, 1474291800: 8.0, 1474335000: 8.0, 1474293600: 8.0, 1474290000: 8.0,
    # 1474363800: 8.0, 1474329600: 8.0, 1474353000: 8.0, 1474358400: 8.0,
    # 1474324200: 8.0, 1474354800: 8.0, 1474295400: 8.0, 1474318800: 8.0, 1474299000: 8.0, 1474342200: 8.0}}
    return S_OK( result )
Esempio n. 49
0
 def __getXRSLExtraString(self):
     # For the XRSL additional string from configuration - only done at initialisation time
     # If this string changes, the corresponding (ARC) site directors have to be restarted
     #
     # Variable = XRSLExtraString
     # Default value = ''
     #   If you give a value, I think it should be of the form
     #          (aaa = "xxx")
     #   Otherwise the ARC job description parser will have a fit
     # Locations searched in order :
     # Top priority    : Resources/Sites/<Grid>/<Site>/CEs/<CE>/XRSLExtraString
     # Second priority : Resources/Sites/<Grid>/<Site>/XRSLExtraString
     # Default         : Resources/Computing/CEDefaults/XRSLExtraString
     #
     self.xrslExtraString = ''  # Start with the default value
     result = getSiteForCE(self.ceHost)
     self.site = ''
     if (result['OK']):
         self.site = result['Value']
     else:
         gLogger.error("Unknown Site ...")
         return
     # Now we know the site. Get the grid
     grid = self.site.split(".")[0]
     # The different possibilities that we have agreed upon
     xtraVariable = "XRSLExtraString"
     firstOption = "Resources/Sites/%s/%s/CEs/%s/%s" % (
         grid, self.site, self.ceHost, xtraVariable)
     secondOption = "Resources/Sites/%s/%s/%s" % (grid, self.site,
                                                  xtraVariable)
     defaultOption = "Resources/Computing/CEDefaults/%s" % xtraVariable
     # Now go about getting the string in the agreed order
     gLogger.debug("Trying to get xrslExtra string : first option %s" %
                   firstOption)
     result = gConfig.getValue(firstOption, defaultValue='')
     if (result != ''):
         self.xrslExtraString = result
         gLogger.debug("Found xrslExtra string : %s" % self.xrslExtraString)
     else:
         gLogger.debug("Trying to get xrslExtra string : second option %s" %
                       secondOption)
         result = gConfig.getValue(secondOption, defaultValue='')
         if (result != ''):
             self.xrslExtraString = result
             gLogger.debug("Found xrslExtra string : %s" %
                           self.xrslExtraString)
         else:
             gLogger.debug(
                 "Trying to get xrslExtra string : default option %s" %
                 defaultOption)
             result = gConfig.getValue(defaultOption, defaultValue='')
             if (result != ''):
                 self.xrslExtraString = result
                 gLogger.debug("Found xrslExtra string : %s" %
                               self.xrslExtraString)
     if (self.xrslExtraString == ''):
         gLogger.always(
             "No XRSLExtra string found in configuration for %s" %
             self.ceHost)
     else:
         gLogger.always("XRSLExtra string : %s" % self.xrslExtraString)
         gLogger.always(" --- to be added to pilots going to CE : %s" %
                        self.ceHost)
Esempio n. 50
0
  def getSiteSEMapping(self):
    """ Returns a dictionary of all sites and their localSEs as a list, e.g.
        {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]}
    """
    if self.siteSEMapping:
      return S_OK(self.siteSEMapping)

    # Get the list of SEs and keep a mapping of those using an Alias or a
    # BaseSE
    storageElements = gConfig.getSections('Resources/StorageElements')
    if not storageElements['OK']:
      gLogger.warn('Problem retrieving storage elements',
                   storageElements['Message'])
      return storageElements
    storageElements = storageElements['Value']
    equivalentSEs = {}
    for se in storageElements:
      for option in ('BaseSE', 'Alias'):
        originalSE = gConfig.getValue(
            'Resources/StorageElements/%s/%s' % (se, option))
        if originalSE:
          equivalentSEs.setdefault(originalSE, []).append(se)
          break

    siteSEMapping = {}
    gridTypes = gConfig.getSections('Resources/Sites/')
    if not gridTypes['OK']:
      gLogger.warn(
          'Problem retrieving sections in /Resources/Sites', gridTypes['Message'])
      return gridTypes

    gridTypes = gridTypes['Value']

    gLogger.debug('Grid Types are: %s' % (', '.join(gridTypes)))
    # Get a list of sites and their local SEs
    siteSet = set()
    storageElementSet = set()
    siteSEMapping[LOCAL] = {}
    for grid in gridTypes:
      result = gConfig.getSections('/Resources/Sites/%s' % grid)
      if not result['OK']:
        gLogger.warn('Problem retrieving /Resources/Sites/%s section' % grid)
        return result
      sites = result['Value']
      siteSet.update(sites)
      for site in sites:
        candidateSEs = gConfig.getValue(
            '/Resources/Sites/%s/%s/SE' % (grid, site), [])
        if candidateSEs:
          candidateSEs += [
              eqSE for se in candidateSEs for eqSE in equivalentSEs.get(se, [])]
          siteSEMapping[LOCAL].setdefault(site, set()).update(candidateSEs)
          storageElementSet.update(candidateSEs)

    # Add Sites from the SiteSEMappingByProtocol in the CS
    siteSEMapping[PROTOCOL] = {}
    cfgLocalSEPath = cfgPath('SiteSEMappingByProtocol')
    result = self.__opsHelper.getOptionsDict(cfgLocalSEPath)
    if result['OK']:
      sites = result['Value']
      for site in sites:
        candidates = set(self.__opsHelper.getValue(
            cfgPath(cfgLocalSEPath, site), []))
        ses = set(resolveSEGroup(candidates - siteSet)
                  ) | (candidates & siteSet)
        # If a candidate is a site, then all local SEs are eligible
        for candidate in ses & siteSet:
          ses.remove(candidate)
          ses.update(siteSEMapping[LOCAL][candidate])
        siteSEMapping[PROTOCOL].setdefault(site, set()).update(ses)

    # Add Sites from the SiteSEMappingByDownload in the CS, else
    # SiteLocalSEMapping (old convention)
    siteSEMapping[DOWNLOAD] = {}
    cfgLocalSEPath = cfgPath('SiteSEMappingByDownload')
    result = self.__opsHelper.getOptionsDict(cfgLocalSEPath)
    if not result['OK']:
      cfgLocalSEPath = cfgPath('SiteLocalSEMapping')
      result = self.__opsHelper.getOptionsDict(cfgLocalSEPath)
    if result['OK']:
      sites = result['Value']
      for site in sites:
        candidates = set(self.__opsHelper.getValue(
            cfgPath(cfgLocalSEPath, site), []))
        ses = set(resolveSEGroup(candidates - siteSet)
                  ) | (candidates & siteSet)
        # If a candidate is a site, then all local SEs are eligible
        for candidate in ses & siteSet:
          ses.remove(candidate)
          ses.update(siteSEMapping[LOCAL][candidate])
        siteSEMapping[DOWNLOAD].setdefault(site, set()).update(ses)

    self.siteSEMapping = siteSEMapping
    # Add storage elements that may not be associated with a site
    result = gConfig.getSections('/Resources/StorageElements')
    if not result['OK']:
      gLogger.warn(
          'Problem retrieving /Resources/StorageElements section', result['Message'])
      return result
    self.storageElementSet = storageElementSet | set(result['Value'])
    self.siteSet = siteSet
    return S_OK(siteSEMapping)
Esempio n. 51
0
    def getJobOutput(self, jobID, localDir=None):
        """ Get the specified job standard output and error files. If the localDir is provided,
        the output is returned as file in this directory. Otherwise, the output is returned
        as strings.
    """
        result = self._prepareProxy()
        self.usercfg.ProxyPath(os.environ['X509_USER_PROXY'])
        if not result['OK']:
            gLogger.error('ARCComputingElement: failed to set up proxy',
                          result['Message'])
            return result

        if jobID.find(':::') != -1:
            pilotRef, stamp = jobID.split(':::')
        else:
            pilotRef = jobID
            stamp = ''
        if not stamp:
            return S_ERROR('Pilot stamp not defined for %s' % pilotRef)

        job = self.__getARCJob(pilotRef)

        arcID = os.path.basename(pilotRef)
        gLogger.debug("Retrieving pilot logs for %s" % pilotRef)
        if "WorkingDirectory" in self.ceParameters:
            workingDirectory = os.path.join(
                self.ceParameters['WorkingDirectory'], arcID)
        else:
            workingDirectory = arcID
        outFileName = os.path.join(workingDirectory, '%s.out' % stamp)
        errFileName = os.path.join(workingDirectory, '%s.err' % stamp)
        gLogger.debug("Working directory for pilot output %s" %
                      workingDirectory)

        isItOkay = job.Retrieve(self.usercfg, arc.URL(workingDirectory), False)
        if isItOkay:
            output = None
            error = None
            try:
                with open(outFileName, 'r') as outFile:
                    output = outFile.read()
                os.unlink(outFileName)
                with open(errFileName, 'r') as errFile:
                    error = errFile.read()
                os.unlink(errFileName)
            except IOError as e:
                gLogger.error("Error downloading outputs",
                              repr(e).replace(',)', ')'))
                return S_ERROR("Error downloading outputs")
            gLogger.debug("Pilot output = %s" % output)
            gLogger.debug("Pilot error = %s" % error)
        else:
            job.Update()
            arcState = job.State.GetGeneralState()
            if arcState != "Undefined":
                return S_ERROR(
                    'Failed to retrieve output for %s as job is not finished (maybe not started yet)'
                    % jobID)
            gLogger.debug(
                "Could not retrieve pilot output for %s - either permission / proxy error or could not connect to CE"
                % pilotRef)
            return S_ERROR('Failed to retrieve output for %s' % jobID)

        return S_OK((output, error))
Esempio n. 52
0
    def __syncComputingElements(self):
        """
        Sync ComputingElements: compares CS with DB and does the necessary modifications.
        """

        res = getCESiteMapping()
        if not res["OK"]:
            return res
        cesCS = list(res["Value"])

        gLogger.verbose("%s Computing elements found in CS" % len(cesCS))

        cesDB = self.rStatus.selectStatusElement(
            "Resource",
            "Status",
            elementType="ComputingElement",
            meta={"columns": ["Name"]})
        if not cesDB["OK"]:
            return cesDB
        cesDB = [ceDB[0] for ceDB in cesDB["Value"]]

        # ComputingElements that are in DB but not in CS
        toBeDeleted = list(set(cesDB).difference(set(cesCS)))
        gLogger.verbose("%s Computing elements to be deleted" %
                        len(toBeDeleted))

        # Delete storage elements
        for ceName in toBeDeleted:

            deleteQuery = self.rStatus._extermineStatusElement(
                "Resource", ceName)

            gLogger.verbose("... %s" % ceName)
            if not deleteQuery["OK"]:
                return deleteQuery

        # statusTypes = RssConfiguration.getValidStatusTypes()[ 'Resource' ]
        statusTypes = self.rssConfig.getConfigStatusType("ComputingElement")

        result = self.rStatus.selectStatusElement(
            "Resource",
            "Status",
            elementType="ComputingElement",
            meta={"columns": ["Name", "StatusType"]})
        if not result["OK"]:
            return result
        cesTuple = [(x[0], x[1]) for x in result["Value"]]

        # For each ( se, statusType ) tuple not present in the DB, add it.
        cesStatusTuples = [(se, statusType) for se in cesCS
                           for statusType in statusTypes]
        toBeAdded = list(set(cesStatusTuples).difference(set(cesTuple)))

        gLogger.debug("%s Computing elements entries to be added" %
                      len(toBeAdded))

        for ceTuple in toBeAdded:

            _name = ceTuple[0]
            _statusType = ceTuple[1]
            _status = self.defaultStatus
            _reason = "Synchronized"
            _elementType = "ComputingElement"

            query = self.rStatus.addIfNotThereStatusElement(
                "Resource",
                "Status",
                name=_name,
                statusType=_statusType,
                status=_status,
                elementType=_elementType,
                tokenOwner=self.tokenOwner,
                reason=_reason,
            )
            if not query["OK"]:
                return query

        return S_OK()
Esempio n. 53
0
    def web_ComponentLocation(self):

        rpcClient = RPCClient("Framework/Monitoring")

        userData = self.getSessionData()

        setup = userData['setup'].split('-')[-1]

        hosts = []
        result = Registry.getHosts()
        if result['OK']:
            hosts = result['Value']

        componentTypes = ['Services', 'Agents']
        if "ComponentType" in self.request.arguments:
            componentTypes = self.request.arguments['ComponentType']

        componentNames = []
        if "ComponentName" in self.request.arguments:
            componentNames = list(
                json.loads(self.request.arguments['ComponentName'][-1]))

        componentModules = []
        if "ComponentModule" in self.request.arguments:
            componentModules = list(
                json.loads(self.request.arguments['ComponentModule'][-1]))

        showAll = 0
        if "showAll" in self.request.arguments:
            showAll = int(self.request.arguments['showAll'][-1])

        selectedHosts = []
        if "Hosts" in self.request.arguments:  # we only use the selected host(s)
            selectedHosts = list(
                json.loads(self.request.arguments['Hosts'][-1]))
        retVal = gConfig.getSections('/Systems')

        compMatching = {}
        fullNames = []
        if retVal['OK']:
            systems = retVal['Value']
            for i in systems:
                for compType in componentTypes:
                    compPath = '/Systems/%s/%s/%s' % (i, setup, compType)
                    retVal = gConfig.getSections(compPath)
                    if retVal['OK']:
                        components = retVal['Value']
                        for j in components:
                            path = '%s/%s' % (i, j)
                            if j in componentNames:
                                fullNames += [path]
                                compMatching[path] = path
                            modulepath = "%s/%s/Module" % (compPath, j)
                            module = gConfig.getValue(modulepath, '')
                            if module != '' and module in componentModules:
                                fullNames += [path]
                            elif module == '' and j in componentModules:
                                fullNames += [path]

                            compMatching[
                                path] = module if module != '' else path

        records = []
        if fullNames:
            condDict = {'Setup': userData['setup'], 'ComponentName': fullNames}
        else:
            if len(componentTypes) < 2:
                cType = 'agent' if componentTypes[-1] == 'Agents' else 'service'
                condDict = {'Setup': userData['setup'], 'Type': cType}
            else:
                condDict = {'Setup': userData['setup']}

        gLogger.debug("condDict" + str(condDict))
        retVal = rpcClient.getComponentsStatus(condDict)

        today = datetime.datetime.today()
        if retVal['OK']:
            components = retVal['Value'][0]
            for setup in components:
                for cType in components[setup]:
                    for name in components[setup][cType]:
                        for component in components[setup][cType][name]:
                            if selectedHosts and 'Host' in component and component[
                                    'Host'] not in selectedHosts:
                                continue
                            elif 'Host' in component and component[
                                    'Host'] not in hosts:
                                continue
                            if 'LastHeartbeat' in component:
                                dateDiff = today - component['LastHeartbeat']
                            else:
                                dateDiff = today - today

                            if showAll == 0 and dateDiff.days >= 2 and 'Host' in component:
                                continue

                            for conv in component:
                                component[conv] = str(component[conv])
                            component['ComponentModule'] = compMatching[
                                component['ComponentName']] if component[
                                    'ComponentName'] in compMatching else component[
                                        'ComponentName']
                            records += [component]

            result = {"success": "true", "result": records}
        else:
            result = {"success": "false", "error": result['Message']}

        self.finish(result)
Esempio n. 54
0
    def submitJob(self, executableFile, proxy, numberOfJobs=1):
        """ Method to submit job
    """

        result = self._prepareProxy()
        self.usercfg.ProxyPath(os.environ['X509_USER_PROXY'])
        if not result['OK']:
            gLogger.error('ARCComputingElement: failed to set up proxy',
                          result['Message'])
            return result

        gLogger.verbose("Executable file path: %s" % executableFile)
        if not os.access(executableFile, 5):
            os.chmod(
                executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP
                | stat.S_IROTH + stat.S_IXOTH)

        batchIDList = []
        stampDict = {}

        endpoint = arc.Endpoint(self.ceHost + ":2811/jobs",
                                arc.Endpoint.JOBSUBMIT,
                                "org.nordugrid.gridftpjob")

        # Submit jobs iteratively for now. Tentatively easier than mucking around with the JobSupervisor class
        for __i in range(numberOfJobs):
            # The basic job description
            jobdescs = arc.JobDescriptionList()
            # Get the job into the ARC way
            xrslString, diracStamp = self.__writeXRSL(executableFile)
            if not arc.JobDescription_Parse(xrslString, jobdescs):
                gLogger.error("Invalid job description")
                break
            # Submit the job
            jobs = arc.JobList()  # filled by the submit process
            submitter = arc.Submitter(self.usercfg)
            result = submitter.Submit(endpoint, jobdescs, jobs)
            # Save info or else ..else.
            if (result == arc.SubmissionStatus.NONE):
                # Job successfully submitted
                pilotJobReference = jobs[0].JobID
                batchIDList.append(pilotJobReference)
                stampDict[pilotJobReference] = diracStamp
                gLogger.debug("Successfully submitted job %s to CE %s" %
                              (pilotJobReference, self.ceHost))
            else:
                message = "Failed to submit job because "
                if (result.isSet(arc.SubmissionStatus.NOT_IMPLEMENTED)):
                    gLogger.warn(
                        "%s feature not implemented on CE? (weird I know - complain to site admins"
                        % message)
                if (result.isSet(arc.SubmissionStatus.NO_SERVICES)):
                    gLogger.warn(
                        "%s no services are running on CE? (open GGUS ticket to site admins"
                        % message)
                if (result.isSet(arc.SubmissionStatus.ENDPOINT_NOT_QUERIED)):
                    gLogger.warn(
                        "%s endpoint was not even queried. (network ..?)" %
                        message)
                if (result.isSet(
                        arc.SubmissionStatus.BROKER_PLUGIN_NOT_LOADED)):
                    gLogger.warn(
                        "%s BROKER_PLUGIN_NOT_LOADED : ARC library installation problem?"
                        % message)
                if (result.isSet(
                        arc.SubmissionStatus.DESCRIPTION_NOT_SUBMITTED)):
                    gLogger.warn(
                        "%s no job description was there (Should not happen, but horses can fly (in a plane))"
                        % message)
                if (result.isSet(
                        arc.SubmissionStatus.SUBMITTER_PLUGIN_NOT_LOADED)):
                    gLogger.warn(
                        "%s SUBMITTER_PLUGIN_NOT_LOADED : ARC library installation problem?"
                        % message)
                if (result.isSet(arc.SubmissionStatus.AUTHENTICATION_ERROR)):
                    gLogger.warn(
                        "%s authentication error - screwed up / expired proxy? Renew / upload pilot proxy on machine?"
                        % message)
                if (result.isSet(arc.SubmissionStatus.ERROR_FROM_ENDPOINT)):
                    gLogger.warn(
                        "%s some error from the CE - ask site admins for more information ..."
                        % message)
                gLogger.warn("%s ... maybe above messages will give a hint." %
                             message)
                break  # Boo hoo *sniff*

        if batchIDList:
            result = S_OK(batchIDList)
            result['PilotStampDict'] = stampDict
        else:
            result = S_ERROR(
                'No pilot references obtained from the ARC job submission')
        return result
Esempio n. 55
0
    def web_componentAction(self):
        """
    Actions which should be done on components. The only parameters is an action
    to perform.
    Returns standard JSON response structure with with service response
    or error messages
    """

        userData = self.getSessionData()

        DN = str(userData["user"]["DN"])
        group = str(userData["user"]["group"])

        if not (("action" in self.request.arguments) and
                (len(self.request.arguments["action"][0]) > 0)):
            self.finish({"success": "false", "error": "No action defined"})
            return

        action = str(self.request.arguments["action"][0])

        if action not in ["restart", "start", "stop"]:
            error = "The request parameters action '%s' is unknown" % action
            gLogger.debug(error)
            self.finish({"success": "false", "error": error})
            return

        result = dict()
        for i in self.request.arguments:
            if i == "action":
                continue

            target = i.split("@")
            if not len(target) == 2:
                continue

            system = self.request.arguments[i][0]
            gLogger.always("System: %s" % system)
            host = target[1]
            gLogger.always("Host: %s" % host)
            component = target[0]
            gLogger.always("Component: %s" % component)
            if not host in result:
                result[host] = list()
            result[host].append([system, component])

        if not result:
            error = "Failed to get component(s) for %s" % action
            gLogger.debug(error)
            self.finish({"success": "false", "error": error})

        gLogger.always(result)
        actionSuccess = list()
        actionFailed = list()

        for hostname in result:

            if not result[hostname]:
                continue

            client = SystemAdministratorClient(hostname,
                                               None,
                                               delegatedDN=DN,
                                               delegatedGroup=group)

            for i in result[hostname]:

                system = i[0]
                component = i[1]

                try:
                    if action == "restart":
                        result = yield self.threadTask(client.restartComponent,
                                                       system, component)
                    elif action == "start":
                        result = yield self.threadTask(client.startComponent,
                                                       system, component)
                    elif action == "stop":
                        result = yield self.threadTask(client.stopComponent,
                                                       system, component)
                    else:
                        result = list()
                        result["Message"] = "Action %s is not valid" % action
                except Exception as x:
                    result = list()
                    result["Message"] = "Exception: %s" % str(x)
                gLogger.debug("Result: %s" % result)

                if not result["OK"]:
                    error = hostname + ": " + result["Message"]
                    actionFailed.append(error)
                    gLogger.error("Failure during component %s: %s" %
                                  (action, error))
                else:
                    gLogger.always("Successfully %s component %s" %
                                   (action, component))
                    actionSuccess.append(component)

        self.finish(
            self.aftermath(actionSuccess, actionFailed, action, "Component"))
Esempio n. 56
0
    def getCEStatus(self):
        """ Method to return information on running and pending jobs.
        We hope to satisfy both instances that use robot proxies and those which use proper configurations.
    """

        result = self._prepareProxy()
        self.usercfg.ProxyPath(os.environ['X509_USER_PROXY'])
        if not result['OK']:
            gLogger.error('ARCComputingElement: failed to set up proxy',
                          result['Message'])
            return result

        # Try to find out which VO we are running for.
        vo = ''
        res = getVOfromProxyGroup()
        if res['OK']:
            vo = res['Value']

        result = S_OK()
        result['SubmittedJobs'] = 0
        if not vo:
            # Presumably the really proper way forward once the infosys-discuss WG comes up with a solution
            # and it is implemented. Needed for DIRAC instances which use robot certificates for pilots.
            endpoints = [
                arc.Endpoint(
                    "ldap://" + self.ceHost + "/MDS-Vo-name=local,o=grid",
                    arc.Endpoint.COMPUTINGINFO, 'org.nordugrid.ldapng')
            ]
            retriever = arc.ComputingServiceRetriever(self.usercfg, endpoints)
            retriever.wait(
            )  # Takes a bit of time to get and parse the ldap information
            targets = retriever.GetExecutionTargets()
            ceStats = targets[0].ComputingShare
            gLogger.debug("Running jobs for CE %s : %s" %
                          (self.ceHost, ceStats.RunningJobs))
            gLogger.debug("Waiting jobs for CE %s : %s" %
                          (self.ceHost, ceStats.WaitingJobs))
            result['RunningJobs'] = ceStats.RunningJobs
            result['WaitingJobs'] = ceStats.WaitingJobs
        else:
            # The system which works properly at present for ARC CEs that are configured correctly.
            # But for this we need the VO to be known - ask me (Raja) for the whole story if interested.
            cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' % (
                self.ceHost, vo.lower())
            res = shellCall(0, cmd)
            if not res['OK']:
                gLogger.debug("Could not query CE %s - is it down?" %
                              self.ceHost)
                return res
            try:
                ldapValues = res['Value'][1].split("\n")
                running = [
                    lValue for lValue in ldapValues
                    if 'GlueCEStateRunningJobs' in lValue
                ]
                waiting = [
                    lValue for lValue in ldapValues
                    if 'GlueCEStateWaitingJobs' in lValue
                ]
                result['RunningJobs'] = int(running[0].split(":")[1])
                result['WaitingJobs'] = int(waiting[0].split(":")[1])
            except IndexError:
                res = S_ERROR('Unknown ldap failure for site %s' % self.ceHost)
                return res

        return result
Esempio n. 57
0
def getCAsLocation():
  """ Retrieve the CA's files location
  """
  #Grid-Security
  retVal = gConfig.getOption( '%s/Grid-Security' % g_SecurityConfPath )
  if retVal[ 'OK' ]:
    casPath = "%s/certificates" % retVal[ 'Value' ]
    gLogger.debug( "Trying %s for CAs" % casPath )
    if os.path.isdir( casPath ):
      gLogger.debug( "Using %s/Grid-Security + /certificates as location for CA's" % g_SecurityConfPath )
      return casPath
  #CAPath
  retVal = gConfig.getOption( '%s/CALocation' % g_SecurityConfPath )
  if retVal[ 'OK' ]:
    casPath = retVal[ 'Value' ]
    gLogger.debug( "Trying %s for CAs" % casPath )
    if os.path.isdir( casPath ):
      gLogger.debug( "Using %s/CALocation as location for CA's" % g_SecurityConfPath )
      return casPath
  # Look up the X509_CERT_DIR environment variable
  if os.environ.has_key( 'X509_CERT_DIR' ):
    gLogger.debug( "Using X509_CERT_DIR env var as location for CA's" )
    casPath = os.environ[ 'X509_CERT_DIR' ]
    return casPath
  #rootPath./etc/grid-security/certificates
  casPath = "%s/etc/grid-security/certificates" % DIRAC.rootPath
  gLogger.debug( "Trying %s for CAs" % casPath )
  if os.path.isdir( casPath ):
    gLogger.debug( "Using <DIRACRoot>/etc/grid-security/certificates as location for CA's" )
    return casPath
  #/etc/grid-security/certificates
  casPath = "/etc/grid-security/certificates"
  gLogger.debug( "Trying %s for CAs" % casPath )
  if os.path.isdir( casPath ):
    gLogger.debug( "Using autodiscovered %s location for CA's" % casPath )
    return casPath
  #No CA's location found
  return False
Esempio n. 58
0
    def sendMail(self, sendDict=None, title=None, body=None, fromAddress=None):
        """
    Sending an email using sendDict: { e-mail : name } as addressbook
    title and body is the e-mail's Subject and Body
    fromAddress is an email address in behalf of whom the message is sent
    Return success/failure JSON structure
    """

        if not sendDict:
            result = ""
            gLogger.debug(result)
            return {"success": "false", "error": result}

        if not title:
            result = "title argument is missing"
            gLogger.debug(result)
            return {"success": "false", "error": result}

        if not body:
            result = "body argument is missing"
            gLogger.debug(result)
            return {"success": "false", "error": result}

        if not fromAddress:
            result = "fromAddress argument is missing"
            gLogger.debug(result)
            return {"success": "false", "error": result}

        sentSuccess = list()
        sentFailed = list()
        gLogger.debug("Initializing Notification client")
        ntc = NotificationClient(
            lambda x, timeout: RPCClient(x, timeout=timeout, static=True))

        for email, name in sendDict.iteritems():
            result = ntc.sendMail(email, title, body, fromAddress, False)
            if not result["OK"]:
                error = name + ": " + result["Message"]
                sentFailed.append(error)
                gLogger.error("Sent failure: ", error)
            else:
                gLogger.info("Successfully sent to %s" % name)
                sentSuccess.append(name)

        success = ", ".join(sentSuccess)
        failure = "\n".join(sentFailed)

        if success and failure:
            result = "Successfully sent e-mail to: "
            result = result + success + "\n\nFailed to send e-mail to:\n" + failure
            gLogger.debug(result)
            return {"success": "true", "result": result}
        elif success and len(failure) < 1:
            result = "Successfully sent e-mail to: %s" % success
            gLogger.debug(result)
            return {"success": "true", "result": result}
        elif len(success) < 1 and failure:
            result = "Failed to sent email to:\n%s" % failure
            gLogger.debug(result)
            return {"success": "false", "error": result}

        result = "No messages were sent due technical failure"
        gLogger.debug(result)
        return {"success": "false", "error": result}
Esempio n. 59
0
def createDataTransformation(
    flavour,
    targetSE,
    sourceSE,
    metaKey,
    metaValue,
    extraData=None,
    extraname='',
    groupSize=1,
    plugin='Broadcast',
    tGroup=None,
    tBody=None,
    enable=False,
):
    """Creates the replication transformation based on the given parameters.

  :param str flavour: Flavour of replication to create: Replication or Moving
  :param targetSE: Destination for files
  :type targetSE: python:list or str
  :param sourceSE: Origin of files
  :type sourceSE: python:list or str
  :param int metaKey: Meta key to identify input files
  :param int metaValue: Meta value to identify input files
  :param dict metaData: Additional meta data to use to identify input files
  :param str extraname: addition to the transformation name, only needed if the same transformation was already created
  :param int groupSize: number of files per transformation taks
  :param str plugin: plugin to use
  :param str tGroup: transformation group to set
  :param tBody: transformation body to set
  :param bool enable: if true submit the transformation, otherwise dry run
  :returns: S_OK (with the transformation object, if successfully added), S_ERROR
  """
    metadata = {metaKey: metaValue}
    if isinstance(extraData, dict):
        metadata.update(extraData)

    gLogger.debug("Using %r for metadata search" % metadata)

    if isinstance(targetSE, six.string_types):
        targetSE = [targetSE]

    if isinstance(sourceSE, (list, tuple)):
        sourceSE = '%s' % (",".join(sourceSE))

    gLogger.debug('Using plugin: %r' % plugin)

    if flavour not in ('Replication', 'Moving'):
        return S_ERROR('Unsupported flavour %s' % flavour)

    transVerb = {'Replication': 'Replicate', 'Moving': 'Move'}[flavour]
    transGroup = {
        'Replication': 'Replication',
        'Moving': 'Moving'
    }[flavour] if not tGroup else tGroup

    trans = Transformation()
    transName = '%s_%s_%s' % (transVerb, str(metaValue), ",".join(targetSE))
    if extraname:
        transName += "_%s" % extraname

    trans.setTransformationName(transName)
    description = '%s files for %s %s to %s' % (
        transVerb, metaKey, str(metaValue), ",".join(targetSE))
    trans.setDescription(description[:255])
    trans.setLongDescription(description)
    trans.setType('Replication')
    trans.setTransformationGroup(transGroup)
    trans.setGroupSize(groupSize)
    trans.setPlugin(plugin)

    transBody = {
        'Moving': [("ReplicateAndRegister", {
            "SourceSE": sourceSE,
            "TargetSE": targetSE
        }), ("RemoveReplica", {
            "TargetSE": sourceSE
        })],
        'Replication':
        '',  # empty body
    }[flavour] if tBody is None else tBody

    trans.setBody(transBody)
    trans.setInputMetaQuery(metadata)

    if sourceSE:
        res = trans.setSourceSE(sourceSE)
        if not res['OK']:
            return S_ERROR("SourceSE not valid: %s" % res['Message'])
    res = trans.setTargetSE(targetSE)
    if not res['OK']:
        return S_ERROR("TargetSE not valid: %s" % res['Message'])

    if not enable:
        gLogger.always("Dry run, not creating transformation")
        return S_OK()

    res = trans.addTransformation()
    if not res['OK']:
        return res
    gLogger.verbose(res)
    trans.setStatus('Active')
    trans.setAgentType('Automatic')

    gLogger.always("Successfully created replication transformation")
    return S_OK(trans)
Esempio n. 60
0
 def __removeLocal(self, localPath):
   if os.path.isfile(localPath):
     gLogger.debug('Remove invalid local file:', localPath)
     os.remove(localPath)