Beispiel #1
0
  def _getMissingReplicas( self ):
    """ This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
        while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
        All SEs are considered, even if their Cache is full
    """
    # Get Replicas that are in Staged/StageSubmitted 
    gLogger.info( 'StageRequest._getMissingReplicas: Checking Staged Replicas' )

    res = self.__getStagedReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = {}

    allReplicaInfo = res['Value']['AllReplicaInfo']
    replicasToStage = []
    for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
      # Consider all SEs
      replicasToStage.extend( seReplicaIDs )

    # Get Replicas from the same Tasks as those selected
    res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
    if not res['OK']:
      gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'] )

    return res
Beispiel #2
0
  def setConfig(self,databaseConfig):

    self.directories = {}
    # In memory storage of the various parameters
    self.users = {}
    self.uids = {}
    self.groups = {}
    self.gids = {}
    self.seNames = {}
    self.seids = {}
    self.seDefinitions = {}

    # Obtain some general configuration of the database
    self.uniqueGUID = databaseConfig['UniqueGUID']
    self.globalReadAccess = databaseConfig['GlobalReadAccess']
    self.lfnPfnConvention = databaseConfig['LFNPFNConvention']
    self.resolvePfn = databaseConfig['ResolvePFN']
    self.umask = databaseConfig['DefaultUmask']
    self.visibleStatus = databaseConfig['VisibleStatus']

    try:
      # Obtain the plugins to be used for DB interaction
      self.ugManager = eval("%s(self)" % databaseConfig['UserGroupManager'])
      self.seManager = eval("%s(self)" % databaseConfig['SEManager'])
      self.securityManager = eval("%s(self)" % databaseConfig['SecurityManager'])
      self.dtree = eval("%s(self)" % databaseConfig['DirectoryManager'])
      self.fileManager = eval("%s(self)" % databaseConfig['FileManager'])
      self.dmeta = eval("%s(self)" % databaseConfig['DirectoryMetadata'])
      self.fmeta = eval("%s(self)" % databaseConfig['FileMetadata'])
    except Exception, x:
      gLogger.fatal("Failed to create database objects",x)
      return S_ERROR("Failed to create database objects")
def run( parameters , delete, nthreads ):
  """
  The main user interface
  """

  source_dir = parameters[0]
  dest_dir = parameters[1]
  upload = False
  storage = None

  if len( parameters ) == 3:
    storage = parameters[2]
    source_dir = os.path.abspath(source_dir)
    dest_dir = dest_dir.rstrip('/')
    upload = True
    if not os.path.isdir(source_dir):
      gLogger.fatal("Source directory does not exist")
      DIRAC.exit( 1 )

  if len (parameters ) == 2:
    dest_dir = os.path.abspath(dest_dir)
    source_dir = source_dir.rstrip('/')
    if not os.path.isdir(dest_dir):
      gLogger.fatal("Destination directory does not exist")
      DIRAC.exit( 1 )

  res = syncDestinations( upload, source_dir, dest_dir, storage, delete, nthreads )
  if not res['OK']:
    return S_ERROR(res['Message'])

  return S_OK("Successfully mirrored " + source_dir + " into " + dest_dir)
def initializePlottingHandler( serviceInfo ):

  #Get data location
  plottingSection = PathFinder.getServiceSection( "Framework/Plotting" )
  dataPath = gConfig.getValue( "%s/DataLocation" % plottingSection, "data/graphs" )
  dataPath = dataPath.strip()
  if "/" != dataPath[0]:
    dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
  gLogger.info( "Data will be written into %s" % dataPath )
  try:
    os.makedirs( dataPath )
  except:
    pass
  try:
    testFile = "%s/plot__.test" % dataPath
    fd = file( testFile, "w" )
    fd.close()
    os.unlink( testFile )
  except IOError:
    gLogger.fatal( "Can't write to %s" % dataPath )
    return S_ERROR( "Data location is not writable" )

  gPlotCache.setPlotsLocation( dataPath )
  gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Plotting requests", "plots", gMonitor.OP_SUM )
  return S_OK()
def initializeMonitoringHandler( serviceInfo ):
  #Check that the path is writable
  monitoringSection = PathFinder.getServiceSection( "Framework/Monitoring" )
  #Get data location
  dataPath = gConfig.getValue( "%s/DataLocation" % monitoringSection, "data/monitoring" )
  dataPath = dataPath.strip()
  if "/" != dataPath[0]:
    dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
  gLogger.info( "Data will be written into %s" % dataPath )
  try:
    os.makedirs( dataPath )
  except:
    pass
  try:
    testFile = "%s/mon.jarl.test" % dataPath
    fd = file( testFile, "w" )
    fd.close()
    os.unlink( testFile )
  except IOError:
    gLogger.fatal( "Can't write to %s" % dataPath )
    return S_ERROR( "Data location is not writable" )
  #Define globals
  gServiceInterface.initialize( dataPath )
  if not gServiceInterface.initializeDB():
    return S_ERROR( "Can't start db engine" )
  gMonitor.registerActivity( "cachedplots", "Cached plot images", "Monitoring plots", "plots", gMonitor.OP_SUM )
  gMonitor.registerActivity( "drawnplots", "Drawn plot images", "Monitoring plots", "plots", gMonitor.OP_SUM )
  return S_OK()
Beispiel #6
0
 def setRequest(self,requestType,requestName,requestString,requestStatus='ToDo',url=''):
   """ Set request. URL can be supplied if not a all VOBOXes will be tried in random order.
   """
   try:
     urls = []
     if url:
       urls[url]
     urls.append(self.voBoxUrls)
     for url in urls:
       requestRPCClient = RPCClient(url)
       res = requestRPCClient.setRequest(requestType,requestName,requestStatus,requestString)
       if res['OK']:
         gLogger.info("Succeded setting request for %s at %s" % (requestName,url))
         res["Server"] = url
         return res
       else:
         errKey = "Failed setting request at %s" % url
         errExpl = " : for %s because: %s" % (requestName,res['Message'])
         gLogger.error(errKey,errExpl)
     errKey = "Completely failed setting request"
     errExpl = " : %s\n%s\n%s" % (requestName,requestType,requestString)
     gLogger.fatal(errKey,errExpl)
     return S_ERROR(errKey)
   except Exception,x:
     errKey = "Completely failed setting request"
     errExpl = " : for %s with exception %s" % (requestName,str(x))
     gLogger.exception(errKey,errExpl)
     return S_ERROR(errKey)
def initializeSecurityLoggingHandler( serviceInfo ):
  global gSecurityFileLog

  serviceCS = serviceInfo [ 'serviceSectionPath' ]
  dataPath = gConfig.getValue( "%s/DataLocation" % serviceCS, "data/securityLog" )
  dataPath = dataPath.strip()
  if "/" != dataPath[0]:
    dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
  gLogger.info( "Data will be written into %s" % dataPath )
  try:
    os.makedirs( dataPath )
  except:
    pass
  try:
    testFile = "%s/seclog.jarl.test" % dataPath
    fd = file( testFile, "w" )
    fd.close()
    os.unlink( testFile )
  except IOError:
    gLogger.fatal( "Can't write to %s" % dataPath )
    return S_ERROR( "Data location is not writable" )
  #Define globals
  gSecurityFileLog = SecurityFileLog( dataPath )
  SecurityLogClient().setLogStore( gSecurityFileLog )
  return S_OK()
Beispiel #8
0
  def __parseCommandLine(self):
    gLogger.debug("Parsing command line")
    shortOption = ""
    longOptionList = []
    for optionTuple in self.commandOptionList:
      if shortOption.find(optionTuple[0]) < 0:
        shortOption += "%s" % optionTuple[0]
      else:
        if optionTuple[0]:
          gLogger.error("Short option -%s has been already defined" % optionTuple[0])
      if not optionTuple[1] in longOptionList:
        longOptionList.append("%s" % optionTuple[1])
      else:
        if optionTuple[1]:
          gLogger.error("Long option --%s has been already defined" % optionTuple[1])

    try:
      opts, args = getopt.gnu_getopt(sys.argv[self.firstOptionIndex:], shortOption, longOptionList)
    except getopt.GetoptError as x:
      # x = option "-k" not recognized
      # print help information and exit
      gLogger.fatal("Error when parsing command line arguments: %s" % str(x))
      self.showHelp(exitCode=2)

    for o, _ in opts:
      if o in ('-h', '--help'):
        self.showHelp()
        sys.exit(2)

    self.cliAdditionalCFGFiles = [arg for arg in args if arg[-4:] == ".cfg"]
    self.commandArgList = [arg for arg in args if not arg[-4:] == ".cfg"]
    self.parsedOptionList = opts
    self.isParsed = True
Beispiel #9
0
 def setRequest( self, requestName, requestString, url = '' ):
   """ Set request. URL can be supplied if not a all VOBOXes will be tried in random order.
   """
   try:
     if url:
       urls = [url]
     elif self.central:
       urls = [self.central]
       if self.voBoxUrls:
         urls += self.voBoxUrls
     else:
       return S_ERROR( "No urls defined" )
     for url in urls:
       requestRPCClient = RPCClient( url, timeout = 120 )
       res = requestRPCClient.setRequest( requestName, requestString )
       if res['OK']:
         gLogger.info( "Succeded setting request  %s at %s" % ( requestName, url ) )
         res["Server"] = url
         return res
       else:
         errKey = "Failed setting request at %s" % url
         errExpl = " : for %s because: %s" % ( requestName, res['Message'] )
         gLogger.error( errKey, errExpl )
     errKey = "Completely failed setting request"
     errExpl = " : %s\n%s" % ( requestName, requestString )
     gLogger.fatal( errKey, errExpl )
     return S_ERROR( errKey )
   except Exception, x:
     errKey = "Completely failed setting request"
     gLogger.exception( errKey, requestName, x )
     return S_ERROR( errKey )
  def initialize( self ):

    self.RequestDBClient = RequestClient()
    backend = self.am_getOption( 'Backend', '' )
    self.RequestDB = False
    if backend == 'mysql':
      from DIRAC.RequestManagementSystem.DB.RequestDBMySQL import RequestDBMySQL
      requestDB = RequestDBMySQL()
      if requestDB._connected:
        self.RequestDB = requestDB



    gMonitor.registerActivity( "Iteration", "Agent Loops", "DISETForwardingAgent", "Loops/min", gMonitor.OP_SUM )
    gMonitor.registerActivity( "Attempted", "Request Processed", "DISETForwardingAgent", "Requests/min", gMonitor.OP_SUM )
    gMonitor.registerActivity( "Successful", "Request Forward Successful", "DISETForwardingAgent", "Requests/min", gMonitor.OP_SUM )
    gMonitor.registerActivity( "Failed", "Request Forward Failed", "DISETForwardingAgent", "Requests/min", gMonitor.OP_SUM )

    self.local = PathFinder.getServiceURL( "RequestManagement/localURL" )
    if not self.local:
      self.local = AgentModule.am_getOption( self, 'localURL', '' )
    if not self.local:
      errStr = 'The RequestManagement/localURL option must be defined.'
      gLogger.fatal( errStr )
      return S_ERROR( errStr )
    return S_OK()
  def callbackStagedTasks( self ):
    """ This updates the status of the Tasks to Done then issues the call back message
    """
    res = self.storageDB.getTasksWithStatus( 'Staged' )
    if not res['OK']:
      gLogger.fatal( "RequestFinalization.callbackStagedTasks: Failed to get Staged Tasks from StagerDB.", res['Message'] )
      return res
    stagedTasks = res['Value']
    gLogger.info( "RequestFinalization.callbackStagedTasks: Obtained %s tasks in the 'Staged' status." % len( stagedTasks ) )
    for taskID, ( source, callback, sourceTask ) in stagedTasks.items():
      if ( callback and sourceTask ):
        res = self.__performCallback( 'Done', callback, sourceTask )
        if not res['OK']:
          stagedTasks.pop( taskID )
        else:
          gLogger.info( "RequestFinalization.callbackStagedTasks, Task = %s: %s" % ( sourceTask, res['Value'] ) )

    if not stagedTasks:
      gLogger.info( "RequestFinalization.callbackStagedTasks: No tasks to update to Done." )
      return S_OK()
    # Daniela: Why is the line below commented out?
    #res = self.stagerClient.setTasksDone(stagedTasks.keys())
    res = self.storageDB.removeTasks( stagedTasks.keys() )
    if not res['OK']:
      gLogger.fatal( "RequestFinalization.callbackStagedTasks: Failed to remove staged Tasks.", res['Message'] )
    return res
Beispiel #12
0
 def cliDisconnect( self, cliTrid ):
   if cliTrid not in self.__byClient:
     gLogger.fatal( "This shouldn't happen!" )
     return
   gLogger.info( "Client to %s disconnected messaging connection" % self.__byClient[ cliTrid ][ 'srvName' ] )
   self.__byClient[ cliTrid ][ 'srvEnd' ].disconnect()
   self.__removeClient( cliTrid )
def initializeDataLoggingHandler( serviceInfo ):

  global dataPath
  global logDB
  logDB = DataLoggingDB()

  monitoringSection = PathFinder.getServiceSection( "DataManagement/DataLogging" )
  #Get data location
  retDict = gConfig.getOption( "%s/DataLocation" % monitoringSection, "dataLoggingPlots" )
  if not retDict[ 'OK' ]:
    return retDict
  dataPath = retDict[ 'Value' ].strip()
  if "/" != dataPath[0]:
    dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
  gLogger.info( "Data will be written into %s" % dataPath )
  try:
    os.makedirs( dataPath )
  except:
    pass
  try:
    testFile = "%s/mon.jarl.test" % dataPath
    fd = file( testFile, "w" )
    fd.close()
    os.unlink( testFile )
  except IOError:
    gLogger.fatal( "Can't write to %s" % dataPath )
    return S_ERROR( "Data location is not writable" )
  return S_OK()
def initializeReportGeneratorHandler( serviceInfo ):
  global gAccountingDB
  gAccountingDB = AccountingDB( readOnly = True )
  #Get data location
  reportSection = PathFinder.getServiceSection( "Accounting/ReportGenerator" )
  dataPath = gConfig.getValue( "%s/DataLocation" % reportSection, "data/accountingGraphs" )
  dataPath = dataPath.strip()
  if "/" != dataPath[0]:
    dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
  gLogger.info( "Data will be written into %s" % dataPath )
  try:
    os.makedirs( dataPath )
  except:
    pass
  try:
    testFile = "%s/acc.jarl.test" % dataPath
    fd = file( testFile, "w" )
    fd.close()
    os.unlink( testFile )
  except IOError:
    gLogger.fatal( "Can't write to %s" % dataPath )
    return S_ERROR( "Data location is not writable" )
  gDataCache.setGraphsLocation( dataPath )
  gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Accounting reports", "plots", gMonitor.OP_SUM )
  gMonitor.registerActivity( "reportsRequested", "Generated reports", "Accounting reports", "reports", gMonitor.OP_SUM )
  return S_OK()
Beispiel #15
0
    def __init__(self, transID=0, transClient=""):
        API.__init__(self)
        self.paramTypes = {
            "TransformationID": [types.IntType, types.LongType],
            "TransformationName": types.StringTypes,
            "Status": types.StringTypes,
            "Description": types.StringTypes,
            "LongDescription": types.StringTypes,
            "Type": types.StringTypes,
            "Plugin": types.StringTypes,
            "AgentType": types.StringTypes,
            "FileMask": types.StringTypes,
            "TransformationGroup": types.StringTypes,
            "GroupSize": [types.IntType, types.LongType, types.FloatType],
            "InheritedFrom": [types.IntType, types.LongType],
            "Body": types.StringTypes,
            "MaxNumberOfTasks": [types.IntType, types.LongType],
            "EventsPerTask": [types.IntType, types.LongType],
        }
        self.paramValues = {
            "TransformationID": 0,
            "TransformationName": "",
            "Status": "New",
            "Description": "",
            "LongDescription": "",
            "Type": "",
            "Plugin": "Standard",
            "AgentType": "Manual",
            "FileMask": "",
            "TransformationGroup": "General",
            "GroupSize": 1,
            "InheritedFrom": 0,
            "Body": "",
            "MaxNumberOfTasks": 0,
            "EventsPerTask": 0,
        }

        self.supportedPlugins = ["Broadcast", "Standard", "BySize", "ByShare"]
        if not transClient:
            self.transClient = TransformationClient()
        else:
            self.transClient = transClient
        self.serverURL = self.transClient.getServer()
        self.exists = False
        if transID:
            self.paramValues["TransformationID"] = transID
            res = self.getTransformation()
            if res["OK"]:
                self.exists = True
            elif res["Message"] == "Transformation does not exist":
                raise AttributeError, "TransformationID %d does not exist" % transID
            else:
                self.paramValues["TransformationID"] = 0
                gLogger.fatal(
                    "Failed to get transformation from database", "%s @ %s" % (transID, self.transClient.serverURL)
                )
Beispiel #16
0
  def __init__( self, transID = 0, transClient = None ):
    """ c'tor
    """
    super( Transformation, self ).__init__()

    self.paramTypes = { 'TransformationID'      : [types.IntType, types.LongType],
                        'TransformationName'    : types.StringTypes,
                        'Status'                : types.StringTypes,
                        'Description'           : types.StringTypes,
                        'LongDescription'       : types.StringTypes,
                        'Type'                  : types.StringTypes,
                        'Plugin'                : types.StringTypes,
                        'AgentType'             : types.StringTypes,
                        'FileMask'              : types.StringTypes,
                        'TransformationGroup'   : types.StringTypes,
                        'GroupSize'             : [types.IntType, types.LongType, types.FloatType],
                        'InheritedFrom'         : [types.IntType, types.LongType],
                        'Body'                  : types.StringTypes,
                        'MaxNumberOfTasks'      : [types.IntType, types.LongType],
                        'EventsPerTask'         : [types.IntType, types.LongType]}
    self.paramValues = { 'TransformationID'      : 0,
                         'TransformationName'    : '',
                         'Status'                : 'New',
                         'Description'           : '',
                         'LongDescription'       : '',
                         'Type'                  : '',
                         'Plugin'                : 'Standard',
                         'AgentType'             : 'Manual',
                         'FileMask'              : '',
                         'TransformationGroup'   : 'General',
                         'GroupSize'             : 1,
                         'InheritedFrom'         : 0,
                         'Body'                  : '',
                         'MaxNumberOfTasks'       : 0,
                         'EventsPerTask'          : 0}
    self.ops = Operations()
    self.supportedPlugins = self.ops.getValue( 'Transformations/AllowedPlugins',
                                               ['Broadcast', 'Standard', 'BySize', 'ByShare'] )
    if not transClient:
      self.transClient = TransformationClient()
    else:
      self.transClient = transClient
    self.serverURL = self.transClient.getServer()
    self.exists = False
    if transID:
      self.paramValues['TransformationID'] = transID
      res = self.getTransformation()
      if res['OK']:
        self.exists = True
      elif res['Message'] == 'Transformation does not exist':
        raise AttributeError( 'TransformationID %d does not exist' % transID )
      else:
        self.paramValues['TransformationID'] = 0
        gLogger.fatal( "Failed to get transformation from database", "%s @ %s" % ( transID,
                                                                                   self.transClient.serverURL ) )
Beispiel #17
0
 def __executeOperation( self, operation, *parms, **kwds ):
   transID = self.paramValues['TransformationID']
   if not transID:
     gLogger.fatal( "No TransformationID known" )
     return S_ERROR()
   printOutput = kwds.pop( 'printOutput' )
   execString = "res = self.transClient.%s(transID,*parms,**kwds)" % operation
   exec( execString )
   if printOutput:
     self._prettyPrint( res )
   return res
 def clearReleasedTasks( self ):
   # TODO: issue release of the pins associated to this task
   res = self.storageDB.getTasksWithStatus( 'Released' )
   if not res['OK']:
     gLogger.fatal( "RequestFinalization.clearReleasedTasks: Failed to get Released Tasks from StagerDB.", res['Message'] )
     return res
   stagedTasks = res['Value']
   gLogger.info( "RequestFinalization.clearReleasedTasks: Removing %s tasks..." % len( stagedTasks ) )
   res = self.storageDB.removeTasks( stagedTasks.keys() )
   if not res['OK']:
     gLogger.error( "RequestFinalization.clearReleasedTasks: Failed to remove tasks.", res['Message'] )
     return res
   gLogger.info( "RequestFinalization.clearReleasedTasks: ...removed." )
   return S_OK()
 def __checkMandatoryOptions( self ):
   try:
     isMandatoryMissing = False
     for optionPath in self.mandatoryEntryList:
       optionPath = self.__getAbsolutePath( optionPath )
       if not gConfigurationData.extractOptionFromCFG( optionPath ):
         gLogger.fatal( "Missing mandatory local configuration option", optionPath )
         isMandatoryMissing = True
     if isMandatoryMissing:
       return S_ERROR()
     return S_OK()
   except Exception as e:
     gLogger.exception()
     return S_ERROR( str( e ) )
Beispiel #20
0
 def getTransformationLogging( self, printOutput = False ):
   transID = self.paramValues['TransformationID']
   if not transID:
     gLogger.fatal( "No TransformationID known" )
     return S_ERROR()
   res = self.transClient.getTransformationLogging( transID )
   if not res['OK']:
     if printOutput:
       self._prettyPrint( res )
     return res
   loggingList = res['Value']
   if printOutput:
     self._printFormattedDictList( loggingList, ['Message', 'MessageDate', 'AuthorDN'], 'MessageDate', 'MessageDate' )
   return S_OK( loggingList )
Beispiel #21
0
 def __executeOperation( self, operation, *parms, **kwds ):
   transID = self.paramValues['TransformationID']
   if not transID:
     gLogger.fatal( "No TransformationID known" )
     return S_ERROR()
   printOutput = kwds.pop( 'printOutput' )
   fcn = None
   if hasattr( self.transClient, operation ) and callable( getattr( self.transClient, operation ) ):
     fcn = getattr( self.transClient, operation )
   if not fcn:
     return S_ERROR( "Unable to invoke %s, it isn't a member funtion of TransformationClient" )
   res = fcn( transID, *parms, **kwds )
   if printOutput:
     self._prettyPrint( res )
   return res
Beispiel #22
0
 def getTransformationLogging(self, printOutput=False):
     transID = self.paramValues["TransformationID"]
     if not transID:
         gLogger.fatal("No TransformationID known")
         return S_ERROR()
     res = self.transClient.getTransformationLogging(transID)
     if not res["OK"]:
         if printOutput:
             self._prettyPrint(res)
         return res
     loggingList = res["Value"]
     if printOutput:
         self._printFormattedDictList(
             loggingList, ["Message", "MessageDate", "AuthorDN"], "MessageDate", "MessageDate"
         )
     return S_OK(loggingList)
Beispiel #23
0
  def initializeHandler( cls, serviceInfo ):
    cls.__db = MonitoringDB()
    reportSection = serviceInfo[ 'serviceSectionPath' ]
    dataPath = gConfig.getValue( "%s/DataLocation" % reportSection, "data/monitoringPlots" )
    gLogger.info( "Data will be written into %s" % dataPath )
    mkDir( dataPath )
    try:
      testFile = "%s/moni.plot.test" % dataPath
      with open( testFile, "w" ) as _fd:
        os.unlink( testFile )
    except IOError as err:
      gLogger.fatal( "Can't write to %s" % dataPath, err )
      return S_ERROR( "Data location is not writable: %s" % repr( err ) )
    gDataCache.setGraphsLocation( dataPath )

    return S_OK()
 def monitorStageRequests( self ):
   """ This is the third logical task manages the StageSubmitted->Staged transition of the Replicas
   """
   res = self.__getStageSubmittedReplicas()
   if not res['OK']:
     gLogger.fatal( "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
     return res
   if not res['Value']:
     gLogger.info( "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found" )
     return res
   seReplicas = res['Value']['SEReplicas']
   replicaIDs = res['Value']['ReplicaIDs']
   gLogger.info( "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring." % len( replicaIDs ) )
   for storageElement, seReplicaIDs in seReplicas.items():
     self.__monitorStorageElementStageRequests( storageElement, seReplicaIDs, replicaIDs )
   return S_OK()
Beispiel #25
0
 def submitPinRequests( self ):
   """ This manages the Staged->Pinned transition of the Replicas
   """
   res = self.__getStagedReplicas()
   if not res['OK']:
     gLogger.fatal( "PinRequest.submitPinRequests: Failed to get replicas from StagerDB.", res['Message'] )
     return res
   if not res['Value']:
     gLogger.info( "PinRequest.submitPinRequests: There were no Staged replicas found" )
     return res
   seReplicas = res['Value']
   for storageElement, requestIDs in seReplicas.items():
     gLogger.info( "PinRequest.submitPinRequests: Obtained Staged replicas for pinning at %s." % storageElement )
     for requestID, replicas in requestIDs.items():
       self.__issuePinRequests( storageElement, requestID, replicas )
   return S_OK()
 def submitStageRequests( self ):
   """ This manages the Waiting->StageSubmitted transition of the Replicas
   """
   res = self.__getWaitingReplicas()
   if not res['OK']:
     gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
     return res
   if not res['Value']:
     gLogger.info( "StageRequest.submitStageRequests: There were no Waiting replicas found" )
     return res
   seReplicas = res['Value']['SEReplicas']
   allReplicaInfo = res['Value']['ReplicaIDs']
   gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas Waiting for staging." % len( allReplicaInfo ) )
   for storageElement, seReplicaIDs in seReplicas.items():
     self.__issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
   return S_OK()
Beispiel #27
0
 def getTransformation( self, printOutput = False ):
   transID = self.paramValues['TransformationID']
   if not transID:
     gLogger.fatal( "No TransformationID known" )
     return S_ERROR()
   res = self.transClient.getTransformation( transID, extraParams = True )
   if not res['OK']:
     if printOutput:
       self._prettyPrint( res )
     return res
   transParams = res['Value']
   for paramName, paramValue in transParams.items():
     execString = "self.set%s(paramValue)" % paramName
     exec( execString )
   if printOutput:
     gLogger.info( "No printing available yet" )
   return S_OK( transParams )
Beispiel #28
0
 def addClient(self, cliTrid, destination, clientInitParams, connectParams):
     if cliTrid in self.__byClient:
         gLogger.fatal("Trid is duplicated!! this shouldn't happen")
         return
     msgClient = MessageClient(destination, **clientInitParams)
     msgClient.subscribeToDisconnect(self.__srvDisconnect)
     msgClient.subscribeToAllMessages(self.msgFromSrv)
     msgClient.setUniqueName(connectParams[0])
     result = msgClient.connect(**connectParams[1])
     if not result["OK"]:
         return result
     self.__inOutLock.acquire()
     try:
         self.__byClient[cliTrid] = {"srvEnd": msgClient, "srvTrid": msgClient.getTrid(), "srvName": destination}
         self.__srvToCliTrid[msgClient.getTrid()] = cliTrid
     finally:
         self.__inOutLock.release()
     return result
  def execute( self ):

    # Get the current submitted stage space and the amount of pinned space for each storage element
    res = self.storageDB.getSubmittedStagePins()
    if not res['OK']:
      gLogger.fatal( "StageRequest.submitStageRequests: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
      return res
    self.storageElementUsage = res['Value']
    if self.storageElementUsage:
      gLogger.info( "StageRequest.execute: Active stage/pin requests found at the following sites:" )
      for storageElement in sortList( self.storageElementUsage.keys() ):
        seDict = self.storageElementUsage[storageElement]
        # Daniela: fishy? Changed it to GB and division by 1024 instead of 1000
        gLogger.info( "StageRequest.execute: %s: %s replicas with a size of %.3f GB." % ( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] / ( 1024 * 1024 * 1024.0 ) ) )
    if not self.storageElementUsage:
      gLogger.info( "StageRequest.execute: No active stage/pin requests found." )
    res = self.submitStageRequests()
    return res
Beispiel #30
0
 def __processIncomingRequest(self, trid, msg):
     self.__trInOutLock.acquire()
     try:
         rcvCB = self.__messageTransports[trid]["cbReceiveMessage"]
     except KeyError:
         return S_ERROR("Transport %s unknown" % trid)
     finally:
         self.__trInOutLock.release()
     if not rcvCB:
         gLogger.fatal("Transport %s does not have a callback defined and a message arrived!" % trid)
         return S_ERROR("No message was expected in for this transport")
     # Check message has id and name
     for requiredField in ["name"]:
         if requiredField not in msg:
             gLogger.error("Message does not have %s" % requiredField)
             return S_ERROR("Message does not have %s" % requiredField)
     # Load message
     if "attrs" in msg:
         attrs = msg["attrs"]
         if type(attrs) not in (types.TupleType, types.ListType):
             return S_ERROR("Message args has to be a tuple or a list, not %s" % type(args))
     else:
         attrs = None
     # Do we "unpack" or do we send the raw data to the callback?
     if self.__useMessageObjects:
         result = self.__msgFactory.createMessage(self.__messageTransports[trid]["svcName"], msg["name"], attrs)
         if not result["OK"]:
             return result
         msgObj = result["Value"]
     else:
         msgObj = DummyMessage(msg)
     # Is msg ok?
     if not msgObj.isOK():
         return S_ERROR("Messsage is invalid")
     try:
         # Callback it and return response
         result = rcvCB(trid, msgObj)
         if not isReturnStructure(result):
             return S_ERROR("Request function does not return a result structure")
         return result
     except Exception, e:
         # Whoops. Show exception and return
         gLogger.exception("Exception while processing message %s" % msg["name"])
         return S_ERROR("Exception while processing message %s: %s" % (msg["name"], str(e)))
Beispiel #31
0
 def __processIncomingData( self, trid, receivedResult ):
   #If keep alive, return OK
   if 'keepAlive' in receivedResult and receivedResult[ 'keepAlive' ]:
     return S_OK()
   #If idle read return
   self.__trInOutLock.acquire()
   try:
     idleRead = self.__messageTransports[ trid ][ 'idleRead' ]
   except KeyError:
     return S_ERROR( "Transport %s unknown" % trid )
   finally:
     self.__trInOutLock.release()
   if idleRead:
     if receivedResult[ 'Value' ]:
       gLogger.fatal( "OOOops. Idle read has returned data!" )
     return S_OK()
   if not receivedResult[ 'Value' ]:
     self.__log.debug( "Transport %s closed connection" % trid )
     return self.removeTransport( trid )
   #This is a message req/resp
   msg = receivedResult[ 'Value' ]
   #Valid message?
   if 'request' not in msg:
     gLogger.warn( "Received data does not seem to be a message !!!!" )
     return self.removeTransport( trid )
   #Decide if it's a response or a request
   if msg[ 'request' ]:
     #If message has Id return ACK to received
     if 'id' in msg:
       self.__sendResponse( trid, msg[ 'id' ], S_OK() )
     #Process msg
     result = self.__processIncomingRequest( trid, msg )
   else:
     result = self.__processIncomingResponse( trid, msg )
   #If error close the transport
   if not result[ 'OK' ]:
     gLogger.info( "Closing transport because of error while processing message", result[ 'Message' ] )
     return self.removeTransport( trid )
   return S_OK()
Beispiel #32
0
 def addClient(self, cliTrid, destination, clientInitParams, connectParams):
     if cliTrid in self.__byClient:
         gLogger.fatal("Trid is duplicated!! this shouldn't happen")
         return
     msgClient = MessageClient(destination, **clientInitParams)
     msgClient.subscribeToDisconnect(self.__srvDisconnect)
     msgClient.subscribeToAllMessages(self.msgFromSrv)
     msgClient.setUniqueName(connectParams[0])
     result = msgClient.connect(**connectParams[1])
     if not result['OK']:
         return result
     self.__inOutLock.acquire()
     try:
         self.__byClient[cliTrid] = {
             'srvEnd': msgClient,
             'srvTrid': msgClient.getTrid(),
             'srvName': destination
         }
         self.__srvToCliTrid[msgClient.getTrid()] = cliTrid
     finally:
         self.__inOutLock.release()
     return result
Beispiel #33
0
def resolveSEGroup( seGroupList ):
  seList = []
  if isinstance( seGroupList, basestring ):
    seGroupList = [seGroupList]
  for se in seGroupList:
    seConfig = gConfig.getValue( '/Resources/StorageElementGroups/%s' % se, se )
    if seConfig != se:
      seList += [se.strip() for se in seConfig.split( ',' )]
      # print seList
    else:
      seList.append( se )
    res = gConfig.getSections( '/Resources/StorageElements' )
    if not res['OK']:
      gLogger.fatal( 'Error getting list of SEs from CS', res['Message'] )
      return []
    for se in seList:
      if se not in res['Value']:
        gLogger.fatal( '%s is not a valid SE' % se )
        seList = []
        break

  return seList
Beispiel #34
0
  def setConfig(self,databaseConfig):

    self.directories = {}
    # In memory storage of the various parameters
    self.users = {}
    self.uids = {}
    self.groups = {}
    self.gids = {}
    self.seNames = {}
    self.seids = {}
    self.seDefinitions = {}

    # Obtain some general configuration of the database
    self.uniqueGUID = databaseConfig['UniqueGUID']
    self.globalReadAccess = databaseConfig['GlobalReadAccess']
    self.lfnPfnConvention = databaseConfig['LFNPFNConvention']
    if self.lfnPfnConvention == "None":
      self.lfnPfnConvention = False
    self.resolvePfn = databaseConfig['ResolvePFN']
    self.umask = databaseConfig['DefaultUmask']
    self.validFileStatus = databaseConfig['ValidFileStatus']
    self.validReplicaStatus = databaseConfig['ValidReplicaStatus']
    self.visibleFileStatus = databaseConfig['VisibleFileStatus']
    self.visibleReplicaStatus = databaseConfig['VisibleReplicaStatus']

    try:
      # Obtain the plugins to be used for DB interaction
      self.ugManager = eval("%s(self)" % databaseConfig['UserGroupManager'])
      self.seManager = eval("%s(self)" % databaseConfig['SEManager'])
      self.securityManager = eval("%s(self)" % databaseConfig['SecurityManager'])
      self.dtree = eval("%s(self)" % databaseConfig['DirectoryManager'])
      self.fileManager = eval("%s(self)" % databaseConfig['FileManager'])
      self.datasetManager = eval("%s(self)" % databaseConfig['DatasetManager'])
      self.dmeta = eval("%s(self)" % databaseConfig['DirectoryMetadata'])
      self.fmeta = eval("%s(self)" % databaseConfig['FileMetadata'])
    except Exception, x:
      gLogger.fatal("Failed to create database objects",x)
      return S_ERROR("Failed to create database objects")
Beispiel #35
0
    def submitStageRequests(self):
        """ This manages the following transitions of the Replicas
        * Waiting -> Offline (if the file is not found Cached)
        * Waiting -> StageSubmitted (if the file is found Cached)
        * Offline -> StageSubmitted (if there are not more Waiting replicas)
    """
        # Retry Replicas that have not been Staged in a previous attempt
        res = self._getMissingReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = res['Value']['SEReplicas']
        allReplicaInfo = res['Value']['AllReplicaInfo']

        if seReplicas:
            gLogger.info(
                "StageRequest.submitStageRequests: Completing partially Staged Tasks"
            )
        for storageElement, seReplicaIDs in seReplicas.iteritems():
            gLogger.debug('Staging at %s:' % storageElement, seReplicaIDs)
            self._issuePrestageRequests(storageElement, seReplicaIDs,
                                        allReplicaInfo)

        # Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
        res = self._getOnlineReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = res['Value']['SEReplicas']
        allReplicaInfo = res['Value']['AllReplicaInfo']

        # Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
        res = self._getOfflineReplicas()

        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res

        # Merge info from both results
        for storageElement, seReplicaIDs in res['Value'][
                'SEReplicas'].iteritems():
            seReplicas.setdefault(storageElement, []).extend(seReplicaIDs)
        allReplicaInfo.update(res['Value']['AllReplicaInfo'])

        gLogger.info(
            "StageRequest.submitStageRequests: Obtained %s replicas for staging."
            % len(allReplicaInfo))
        for storageElement, seReplicaIDs in seReplicas.iteritems():
            gLogger.debug('Staging at %s:' % storageElement, seReplicaIDs)
            self._issuePrestageRequests(storageElement, seReplicaIDs,
                                        allReplicaInfo)
        return S_OK()
Beispiel #36
0
 def callbackDoneTasks(self):
     """This issues the call back message for the Tasks with a State='Done'"""
     res = self.stagerClient.getTasksWithStatus("Done")
     if not res["OK"]:
         gLogger.fatal(
             "RequestFinalization.callbackDoneTasks: Failed to get Done Tasks from StorageManagementDB.",
             res["Message"],
         )
         return res
     doneTasks = res["Value"]
     gLogger.info("RequestFinalization.callbackDoneTasks: Obtained %s tasks in the 'Done' status." % len(doneTasks))
     for taskID, (_source, callback, sourceTask) in doneTasks.items():
         if callback and sourceTask:
             res = self.__performCallback("Done", callback, sourceTask)
             if not res["OK"]:
                 doneTasks.pop(taskID)
     if not doneTasks:
         gLogger.info("RequestFinalization.callbackDoneTasks: No tasks to update to Done.")
         return S_OK()
     res = self.stagerClient.removeTasks(list(doneTasks))
     if not res["OK"]:
         gLogger.fatal("RequestFinalization.callbackDoneTasks: Failed to remove Done tasks.", res["Message"])
     return res
Beispiel #37
0
 def getTransformation(self, printOutput=False):
   transID = self.paramValues['TransformationID']
   if not transID:
     gLogger.fatal("No TransformationID known")
     return S_ERROR()
   res = self.transClient.getTransformation(transID, extraParams=True)
   if not res['OK']:
     if printOutput:
       self._prettyPrint(res)
     return res
   transParams = res['Value']
   for paramName, paramValue in transParams.items():
     setter = None
     setterName = "set%s" % paramName
     if hasattr(self, setterName) and callable(getattr(self, setterName)):
       setter = getattr(self, setterName)
     if not setterName:
       gLogger.error("Unable to invoke setter %s, it isn't a member function" % setterName)
       continue
     setter(paramValue)
   if printOutput:
     gLogger.info("No printing available yet")
   return S_OK(transParams)
 def initializeHandler(cls, serviceInfo):
   multiPath = PathFinder.getDatabaseSection("Accounting/MultiDB")
   cls.__acDB = MultiAccountingDB(multiPath, readOnly=True)
   # Get data location
   reportSection = serviceInfo['serviceSectionPath']
   dataPath = gConfig.getValue("%s/DataLocation" % reportSection, "data/accountingGraphs")
   dataPath = dataPath.strip()
   if "/" != dataPath[0]:
     dataPath = os.path.realpath("%s/%s" % (gConfig.getValue('/LocalSite/InstancePath', rootPath), dataPath))
   gLogger.info("Data will be written into %s" % dataPath)
   mkDir(dataPath)
   try:
     testFile = "%s/acc.jarl.test" % dataPath
     fd = file(testFile, "w")
     fd.close()
     os.unlink(testFile)
   except IOError:
     gLogger.fatal("Can't write to %s" % dataPath)
     return S_ERROR("Data location is not writable")
   gDataCache.setGraphsLocation(dataPath)
   gMonitor.registerActivity("plotsDrawn", "Drawn plot images", "Accounting reports", "plots", gMonitor.OP_SUM)
   gMonitor.registerActivity("reportsRequested", "Generated reports", "Accounting reports", "reports", gMonitor.OP_SUM)
   return S_OK()
def initializeSecurityLoggingHandler(serviceInfo):
    global gSecurityFileLog

    serviceCS = serviceInfo["serviceSectionPath"]
    dataPath = gConfig.getValue("%s/DataLocation" % serviceCS, "data/securityLog")
    dataPath = dataPath.strip()
    if "/" != dataPath[0]:
        dataPath = os.path.realpath("%s/%s" % (gConfig.getValue("/LocalSite/InstancePath", rootPath), dataPath))
    gLogger.info("Data will be written into %s" % dataPath)
    mkDir(dataPath)

    try:
        testFile = "%s/seclog.jarl.test" % dataPath
        with open(testFile, "w"):
            pass
        os.unlink(testFile)
    except IOError:
        gLogger.fatal("Can't write to %s" % dataPath)
        return S_ERROR("Data location is not writable")
    # Define globals
    gSecurityFileLog = SecurityFileLog(dataPath)
    SecurityLogClient().setLogStore(gSecurityFileLog)
    return S_OK()
Beispiel #40
0
    def monitorStageRequests(self):
        """This is the third logical task manages the StageSubmitted->Staged transition of the Replicas"""
        res = self.__getStageSubmittedReplicas()
        if not res["OK"]:
            gLogger.fatal(
                "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.",
                res["Message"])
            return res
        if not res["Value"]:
            gLogger.info(
                "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found"
            )
            return res
        seReplicas = res["Value"]["SEReplicas"]
        replicaIDs = res["Value"]["ReplicaIDs"]
        gLogger.info(
            "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring."
            % len(replicaIDs))
        for storageElement, seReplicaIDs in seReplicas.items():
            self.__monitorStorageElementStageRequests(storageElement,
                                                      seReplicaIDs, replicaIDs)

        return self.dataOpSender.concludeSending()
Beispiel #41
0
  def __parseCommandLine( self ):
    gLogger.debug( "Parsing command line" )
    shortOption = ""
    longOptionList = []
    for optionTuple in self.commandOptionList:
      if shortOption.find( optionTuple[0] ) < 0:
        shortOption += "%s" % optionTuple[0]
      else:
        if optionTuple[0]:
          gLogger.error( "Short option -%s has been already defined" % optionTuple[0] )
      if not optionTuple[1] in longOptionList:
        longOptionList.append( "%s" % optionTuple[1] )
      else:
        if optionTuple[1]:
          gLogger.error( "Long option --%s has been already defined" % optionTuple[1] )

    try:
      opts, args = getopt.gnu_getopt( sys.argv[1:], shortOption, longOptionList )
    except getopt.GetoptError, x:
      # x = option "-k" not recognized
      # print help information and exit
      gLogger.fatal( "Error when parsing command line arguments: %s" % str( x ) )
      self.showHelp( exitCode = 2 )
Beispiel #42
0
 def submitStageRequests(self):
     """ This manages the Waiting->StageSubmitted transition of the Replicas
 """
     res = self.__getWaitingReplicas()
     if not res['OK']:
         gLogger.fatal(
             "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
             res['Message'])
         return res
     if not res['Value']:
         gLogger.info(
             "StageRequest.submitStageRequests: There were no Waiting replicas found"
         )
         return res
     seReplicas = res['Value']['SEReplicas']
     allReplicaInfo = res['Value']['ReplicaIDs']
     gLogger.info(
         "StageRequest.submitStageRequests: Obtained %s replicas Waiting for staging."
         % len(allReplicaInfo))
     for storageElement, seReplicaIDs in seReplicas.items():
         self.__issuePrestageRequests(storageElement, seReplicaIDs,
                                      allReplicaInfo)
     return S_OK()
Beispiel #43
0
 def setRequest(self,
                requestType,
                requestName,
                requestString,
                requestStatus='ToDo',
                url=''):
     """ Set request. URL can be supplied if not a all VOBOXes will be tried in random order.
 """
     try:
         urls = []
         if url:
             urls[url]
         urls.append(self.voBoxUrls)
         for url in urls:
             requestRPCClient = RPCClient(url)
             res = requestRPCClient.setRequest(requestType, requestName,
                                               requestStatus, requestString)
             if res['OK']:
                 gLogger.info("Succeded setting request for %s at %s" %
                              (requestName, url))
                 res["Server"] = url
                 return res
             else:
                 errKey = "Failed setting request at %s" % url
                 errExpl = " : for %s because: %s" % (requestName,
                                                      res['Message'])
                 gLogger.error(errKey, errExpl)
         errKey = "Completely failed setting request"
         errExpl = " : %s\n%s\n%s" % (requestName, requestType,
                                      requestString)
         gLogger.fatal(errKey, errExpl)
         return S_ERROR(errKey)
     except Exception, x:
         errKey = "Completely failed setting request"
         errExpl = " : for %s with exception %s" % (requestName, str(x))
         gLogger.exception(errKey, errExpl)
         return S_ERROR(errKey)
Beispiel #44
0
def initializeRequestManagerHandler(serviceInfo):
    """ initialise handler """
    global gRequestDB
    csSection = PathFinder.getServiceSection(
        "RequestManagement/RequestManager")
    backend = gConfig.getValue('%s/Backend' % csSection)
    if not backend:
        fatStr = "RequestManager.initializeRequestManagerHandler: Failed to get backed for RequestDB from CS."
        gLogger.fatal(fatStr)
        return S_ERROR(fatStr)
    gLogger.info(
        "RequestManager.initializeRequestManagerHandler: Initialising with backend",
        backend)
    if backend == 'file':
        from DIRAC.RequestManagementSystem.DB.RequestDBFile import RequestDBFile
        gRequestDB = RequestDBFile()
    elif backend == 'mysql':
        from DIRAC.RequestManagementSystem.DB.RequestDBMySQL import RequestDBMySQL
        gRequestDB = RequestDBMySQL()
    else:
        fatStr = "RequestManager.initializeRequestManagerHandler: Supplied backend is not supported."
        gLogger.fatal(fatStr, backend)
        return S_ERROR(fatStr)
    return S_OK()
    def callbackStagedTasks(self):
        """ This updates the status of the Tasks to Done then issues the call back message
    """
        res = self.stagerClient.getTasksWithStatus('Staged')
        if not res['OK']:
            gLogger.fatal(
                "RequestFinalization.callbackStagedTasks: Failed to get Staged Tasks from StagerDB.",
                res['Message'])
            return res
        stagedTasks = res['Value']
        gLogger.info(
            "RequestFinalization.callbackStagedTasks: Obtained %s tasks in the 'Staged' status."
            % len(stagedTasks))
        for taskID, (source, callback, sourceTask) in stagedTasks.items():
            if (callback and sourceTask):
                res = self.__performCallback('Done', callback, sourceTask)
                if not res['OK']:
                    stagedTasks.pop(taskID)
                else:
                    gLogger.info(
                        "RequestFinalization.callbackStagedTasks, Task = %s: %s"
                        % (sourceTask, res['Value']))

        if not stagedTasks:
            gLogger.info(
                "RequestFinalization.callbackStagedTasks: No tasks to update to Done."
            )
            return S_OK()
        # Daniela: Why is the line below commented out?
        #res = self.stagerClient.setTasksDone(stagedTasks.keys())
        res = self.stagerClient.removeTasks(stagedTasks.keys())
        if not res['OK']:
            gLogger.fatal(
                "RequestFinalization.callbackStagedTasks: Failed to remove staged Tasks.",
                res['Message'])
        return res
 def clearFailedTasks( self ):
   """ This obtains the tasks which are marked as Failed and remove all the associated records
   """
   res = self.storageDB.getTasksWithStatus( 'Failed' )
   if not res['OK']:
     gLogger.fatal( "RequestFinalization.clearFailedTasks: Failed to get Failed Tasks from StagerDB.", res['Message'] )
     return res
   failedTasks = res['Value']
   gLogger.info( "RequestFinalization.clearFailedTasks: Obtained %s tasks in the 'Failed' status." % len( failedTasks ) )
   for taskID, ( source, callback, sourceTask ) in failedTasks.items():
     if ( callback and sourceTask ):
       res = self.__performCallback( 'Failed', callback, sourceTask )
       if not res['OK']:
         failedTasks.pop( taskID )
   if not failedTasks:
     gLogger.info( "RequestFinalization.clearFailedTasks: No tasks to remove." )
     return S_OK()
   gLogger.info( "RequestFinalization.clearFailedTasks: Removing %s tasks..." % len( failedTasks ) )
   res = self.storageDB.removeTasks( failedTasks.keys() )
   if not res['OK']:
     gLogger.error( "RequestFinalization.clearFailedTasks: Failed to remove tasks.", res['Message'] )
     return res
   gLogger.info( "RequestFinalization.clearFailedTasks: ...removed." )
   return S_OK()
Beispiel #47
0
    def doUpload(fc, dm, result, source_dir, dest_dir, storage, delete,
                 nthreads):
        """
    Wrapper for uploading files
    """
        if delete:
            lfns = [
                dest_dir + "/" + filename
                for filename in result['Value']['Delete']['Files']
            ]
            if len(lfns) > 0:
                res = removeRemoteFiles(dm, lfns)
                if not res['OK']:
                    gLogger.fatal('Deleting of files: ' + lfns +
                                  " -X- [FAILED]" + res['Message'])
                    DIRAC.exit(1)
                else:
                    gLogger.notice("Deleting " + ', '.join(lfns) +
                                   " -> [DONE]")

            for directoryname in result['Value']['Delete']['Directories']:
                res = removeRemoteDirectory(fc, dest_dir + "/" + directoryname)
                if not res['OK']:
                    gLogger.fatal('Deleting of directory: ' + directoryname +
                                  " -X- [FAILED] " + res['Message'])
                    DIRAC.exit(1)
                else:
                    gLogger.notice("Deleting " + directoryname + " -> [DONE]")

        for directoryname in result['Value']['Create']['Directories']:
            res = createRemoteDirectory(fc, dest_dir + "/" + directoryname)
            if not res['OK']:
                gLogger.fatal('Creation of directory: ' + directoryname +
                              " -X- [FAILED] " + res['Message'])
                DIRAC.exit(1)
            else:
                gLogger.notice("Creating " + directoryname + " -> [DONE]")

        listOfFiles = result['Value']['Create']['Files']
        # Chech that we do not have to many threads
        if nthreads > len(listOfFiles):
            nthreads = len(listOfFiles)

        if nthreads == 0:
            return S_OK('Upload finished successfully')

        listOfListOfFiles = chunkList(listOfFiles, nthreads)
        res = runInParallel(arguments=[dm, source_dir, dest_dir, storage],
                            listOfLists=listOfListOfFiles,
                            function=uploadListOfFiles)
        if not res['OK']:
            return S_ERROR("Upload of files failed")

        return S_OK('Upload finished successfully')
Beispiel #48
0
    def doDownload(dm, result, source_dir, dest_dir, delete, nthreads):
        """
    Wrapper for downloading files
    """
        if delete:
            for filename in result['Value']['Delete']['Files']:
                res = removeLocalFile(dest_dir + "/" + filename)
                if not res['OK']:
                    gLogger.fatal('Deleting of file: ' + filename +
                                  ' -X- [FAILED] ' + res['Message'])
                    DIRAC.exit(1)
                else:
                    gLogger.notice("Deleting " + filename + " -> [DONE]")

            for directoryname in result['Value']['Delete']['Directories']:
                res = removeLocaDirectory(dest_dir + "/" + directoryname)
                if not res['OK']:
                    gLogger.fatal('Deleting of directory: ' + directoryname +
                                  ' -X- [FAILED] ' + res['Message'])
                    DIRAC.exit(1)
                else:
                    gLogger.notice("Deleting " + directoryname + " -> [DONE]")

        for directoryname in result['Value']['Create']['Directories']:
            res = createLocalDirectory(dest_dir + "/" + directoryname)
            if not res['OK']:
                gLogger.fatal('Creation of directory: ' + directoryname +
                              ' -X- [FAILED] ' + res['Message'])
                DIRAC.exit(1)
            else:
                gLogger.notice("Creating " + directoryname + " -> [DONE]")

        listOfFiles = result['Value']['Create']['Files']
        # Chech that we do not have to many threads
        if nthreads > len(listOfFiles):
            nthreads = len(listOfFiles)

        if nthreads == 0:
            return S_OK('Upload finished successfully')

        listOfListOfFiles = chunkList(listOfFiles, nthreads)
        res = runInParallel(
            arguments=[dm, source_dir, dest_dir],
            listOfLists=listOfListOfFiles,
            function=downloadListOfFiles,
        )

        if not res['OK']:
            return S_ERROR("Download of files failed")

        return S_OK('Upload finished successfully')
Beispiel #49
0
def main():
    localCfg = LocalConfiguration()
    localCfg.setUsageMessage(__doc__)

    positionalArgs = localCfg.getPositionalArguments()
    if len(positionalArgs) == 0:
        gLogger.fatal("You must specify which executor to run!")
        sys.exit(1)

    if len(positionalArgs) == 1 and positionalArgs[0].find("/") > -1:
        mainName = positionalArgs[0]
    else:
        mainName = "Framework/MultiExecutor"

    localCfg.setConfigurationForExecutor(mainName)
    localCfg.addMandatoryEntry("/DIRAC/Setup")
    localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate", "yes")
    localCfg.addDefaultEntry("LogLevel", "INFO")
    localCfg.addDefaultEntry("LogColor", True)
    resultDict = localCfg.loadUserData()
    if not resultDict['OK']:
        gLogger.fatal("There were errors when loading configuration",
                      resultDict['Message'])
        sys.exit(1)

    includeExtensionErrors()
    executorReactor = ExecutorReactor()

    result = executorReactor.loadModules(positionalArgs)
    if not result['OK']:
        gLogger.fatal("Error while loading executor", result['Message'])
        sys.exit(1)

    result = executorReactor.go()
    if not result['OK']:
        gLogger.fatal(result['Message'])
        sys.exit(1)

    gLogger.notice("Graceful exit. Bye!")
    sys.exit(0)
Beispiel #50
0
def doUpload(fc, dm, result, source_dir, dest_dir, storage, delete, nthreads):
    """
    Wrapper for uploading files
    """
    if delete:
        lfns = [dest_dir + "/" + filename for filename in result["Value"]["Delete"]["Files"]]
        if len(lfns) > 0:
            res = removeRemoteFiles(dm, lfns)
            if not res["OK"]:
                gLogger.fatal("Deleting of files: " + lfns + " -X- [FAILED]" + res["Message"])
                DIRAC.exit(1)
            else:
                gLogger.notice("Deleting " + ", ".join(lfns) + " -> [DONE]")

        for directoryname in result["Value"]["Delete"]["Directories"]:
            res = removeRemoteDirectory(fc, dest_dir + "/" + directoryname)
            if not res["OK"]:
                gLogger.fatal("Deleting of directory: " + directoryname + " -X- [FAILED] " + res["Message"])
                DIRAC.exit(1)
            else:
                gLogger.notice("Deleting " + directoryname + " -> [DONE]")

    for directoryname in result["Value"]["Create"]["Directories"]:
        res = returnSingleResult(fc.createDirectory(dest_dir + "/" + directoryname))
        if not res["OK"]:
            gLogger.fatal("Creation of directory: " + directoryname + " -X- [FAILED] " + res["Message"])
            DIRAC.exit(1)
        else:
            gLogger.notice("Creating " + directoryname + " -> [DONE]")

    listOfFiles = result["Value"]["Create"]["Files"]
    # Check that we do not have too many threads
    if nthreads > len(listOfFiles):
        nthreads = len(listOfFiles)

    if nthreads == 0:
        return S_OK("Upload finished successfully")

    listOfListOfFiles = chunkList(listOfFiles, nthreads)
    res = runInParallel(
        arguments=[dm, source_dir, dest_dir, storage], listOfLists=listOfListOfFiles, function=uploadListOfFiles
    )
    if not res["OK"]:
        return S_ERROR("Upload of files failed")

    return S_OK("Upload finished successfully")
Beispiel #51
0
def main():
    # Registering arguments will automatically add their description to the help menu
    Script.registerArgument(["executor: specify which executor to run"])
    positionalArgs = Script.getPositionalArgs()
    localCfg = Script.localCfg

    if len(positionalArgs) == 1 and positionalArgs[0].find("/") > -1:
        mainName = positionalArgs[0]
    else:
        mainName = "Framework/MultiExecutor"

    localCfg.setConfigurationForExecutor(mainName)
    localCfg.addMandatoryEntry("/DIRAC/Setup")
    localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate", "yes")
    localCfg.addDefaultEntry("LogLevel", "INFO")
    localCfg.addDefaultEntry("LogColor", True)
    resultDict = localCfg.loadUserData()
    if not resultDict["OK"]:
        gLogger.fatal("There were errors when loading configuration",
                      resultDict["Message"])
        sys.exit(1)

    includeExtensionErrors()
    executorReactor = ExecutorReactor()

    result = executorReactor.loadModules(positionalArgs)
    if not result["OK"]:
        gLogger.fatal("Error while loading executor", result["Message"])
        sys.exit(1)

    result = executorReactor.go()
    if not result["OK"]:
        gLogger.fatal(result["Message"])
        sys.exit(1)

    gLogger.notice("Graceful exit. Bye!")
    sys.exit(0)
Beispiel #52
0
 def doTheMagic(self):
     if not distMaker.isOK():
         gLogger.fatal("There was an error with the release description")
         return False
     result = distMaker.loadReleases()
     if not result['OK']:
         gLogger.fatal(
             "There was an error when loading the release.cfg file: %s" %
             result['Message'])
         return False
     #Module tars
     if self.cliParams.ignorePackages:
         gLogger.notice("Skipping creating module tarballs")
     else:
         result = self.createModuleTarballs()
         if not result['OK']:
             gLogger.fatal(
                 "There was a problem when creating the module tarballs: %s"
                 % result['Message'])
             return False
     #Externals
     if self.cliParams.ignoreExternals or cliParams.projectName != "DIRAC":
         gLogger.notice("Skipping creating externals tarball")
     else:
         if not self.createExternalsTarballs():
             gLogger.fatal(
                 "There was a problem when creating the Externals tarballs")
             return False
     #Write the releases files
     for relVersion in self.cliParams.releasesToBuild:
         projectCFG = self.relConf.getReleaseCFG(self.cliParams.projectName,
                                                 relVersion)
         projectCFGData = projectCFG.toString() + "\n"
         try:
             relFile = file(
                 os.path.join(
                     self.cliParams.destination, "release-%s-%s.cfg" %
                     (self.cliParams.projectName, relVersion)), "w")
             relFile.write(projectCFGData)
             relFile.close()
         except Exception, exc:
             gLogger.fatal("Could not write the release info: %s" %
                           str(exc))
             return False
         try:
             relFile = file(
                 os.path.join(
                     self.cliParams.destination, "release-%s-%s.md5" %
                     (self.cliParams.projectName, relVersion)), "w")
             relFile.write(md5.md5(projectCFGData).hexdigest())
             relFile.close()
         except Exception, exc:
             gLogger.fatal("Could not write the release info: %s" %
                           str(exc))
             return False
Beispiel #53
0
###
# Load release manager from dirac-install
##
diracInstallLocation = os.path.join(os.path.dirname(__file__), "dirac-install")
if not os.path.isfile(diracInstallLocation):
    diracInstallLocation = os.path.join(os.path.dirname(__file__),
                                        "dirac-install.py")
try:
    diFile = open(diracInstallLocation, "r")
    DiracInstall = imp.load_module("DiracInstall", diFile,
                                   diracInstallLocation,
                                   ("", "r", imp.PY_SOURCE))
    diFile.close()
except Exception, excp:
    raise
    gLogger.fatal("Cannot find dirac-install! Aborting (%s)" % str(excp))
    sys.exit(1)

##END OF LOAD


class Params:
    def __init__(self):
        self.releasesToBuild = []
        self.projectName = 'DIRAC'
        self.debug = False
        self.externalsBuildType = ['client']
        self.ignoreExternals = False
        self.forceExternals = False
        self.ignorePackages = False
        self.relcfg = False
Beispiel #54
0
    def __addAssociatedReplicas(self, replicasToStage, seReplicas,
                                allReplicaInfo):
        """ Retrieve the list of Replicas that belong to the same Tasks as the provided list
    """
        res = self.storageDB.getAssociatedReplicas(replicasToStage)
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.",
                res['Message'])
            return res
        addReplicas = {'Offline': {}, 'Waiting': {}}
        replicaIDs = {}
        for replicaID, info in res['Value'].items():
            lfn = info['LFN']
            storageElement = info['SE']
            size = info['Size']
            pfn = info['PFN']
            status = info['Status']
            if status not in ['Waiting', 'Offline']:
                continue
            if not addReplicas[status].has_key(storageElement):
                addReplicas[status][storageElement] = []
            replicaIDs[replicaID] = {
                'LFN': lfn,
                'PFN': pfn,
                'Size': size,
                'StorageElement': storageElement
            }
            addReplicas[status][storageElement].append(replicaID)

        waitingReplicas = addReplicas['Waiting']
        offlineReplicas = addReplicas['Offline']
        newReplicaInfo = replicaIDs
        allReplicaInfo.update(newReplicaInfo)

        # First handle Waiting Replicas for which metadata is to be checked
        for storageElement, seReplicaIDs in waitingReplicas.items():
            for replicaID in list(seReplicaIDs):
                if replicaID in replicasToStage:
                    seReplicaIDs.remove(replicaID)
            res = self.__checkIntegrity(storageElement, seReplicaIDs,
                                        allReplicaInfo)
            if not res['OK']:
                gLogger.error(
                    'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata',
                    '(%s): %s' % (storageElement, res['Message']))
            else:
                # keep all Replicas (Online and Offline)
                if not storageElement in seReplicas:
                    seReplicas[storageElement] = []
                seReplicas[storageElement].extend(res['Value']['Online'])
                replicasToStage.extend(res['Value']['Online'])
                seReplicas[storageElement].extend(res['Value']['Offline'])
                replicasToStage.extend(res['Value']['Offline'])

        # Then handle Offline Replicas for which metadata is already checked
        for storageElement, seReplicaIDs in offlineReplicas.items():
            if not storageElement in seReplicas:
                seReplicas[storageElement] = []
            for replicaID in sorted(seReplicaIDs):
                if replicaID in replicasToStage:
                    seReplicaIDs.remove(replicaID)
            seReplicas[storageElement].extend(seReplicaIDs)
            replicasToStage.extend(seReplicaIDs)

        for replicaID in allReplicaInfo.keys():
            if replicaID not in replicasToStage:
                del allReplicaInfo[replicaID]

        totalSize = 0
        for storageElement in sorted(seReplicas.keys()):
            replicaIDs = seReplicas[storageElement]
            size = 0
            for replicaID in replicaIDs:
                size += self.__add(storageElement,
                                   allReplicaInfo[replicaID]['Size'])

            gLogger.info(
                'StageRequest.__addAssociatedReplicas:  Considering %s GB to be staged at %s'
                % (size, storageElement))
            totalSize += size

        gLogger.info(
            "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging."
            % totalSize)

        return S_OK({
            'SEReplicas': seReplicas,
            'AllReplicaInfo': allReplicaInfo
        })
Beispiel #55
0
        source_dir = os.path.abspath(source_dir)
        dest_dir = dest_dir.rstrip('/')
        upload = True
        if not os.path.isdir(source_dir):
            gLogger.fatal("Source directory does not exist")
            DIRAC.exit(1)

    if len(parameters) == 2:
        dest_dir = os.path.abspath(dest_dir)
        source_dir = source_dir.rstrip('/')
        if not os.path.isdir(dest_dir):
            gLogger.fatal("Destination directory does not exist")
            DIRAC.exit(1)

    res = syncDestinations(upload, source_dir, dest_dir, storage, delete,
                           nthreads)
    if not res['OK']:
        return S_ERROR(res['Message'])

    return S_OK("Successfully mirrored " + source_dir + " into " + dest_dir)


if __name__ == "__main__":
    returnValue = run(args, sync, parallel)
    if not returnValue['OK']:
        gLogger.fatal(returnValue['Message'])
        DIRAC.exit(1)
    else:
        gLogger.notice(returnValue['Value'])
        DIRAC.exit(0)
Beispiel #56
0
 tasks = None
 requests = []
 full = False
 verbose = False
 status = None
 until = None
 since = None
 terse = False
 all = False
 reset = False
 for switch in Script.getUnprocessedSwitches():
     if switch[0] == 'Job':
         try:
             jobs = [int(job) for job in switch[1].split(',')]
         except:
             gLogger.fatal("Invalid jobID", switch[1])
     elif switch[0] == 'Transformation':
         try:
             transID = int(switch[1])
         except:
             gLogger.fatal('Invalid transID', switch[1])
     elif switch[0] == 'Tasks':
         try:
             taskIDs = [int(task) for task in switch[1].split(',')]
         except:
             gLogger.fatal('Invalid tasks', switch[1])
     elif switch[0] == 'Full':
         full = True
     elif switch[0] == 'Verbose':
         verbose = True
     elif switch[0] == 'Terse':
Beispiel #57
0
    def __init__(self, transID=0, transClient=None):
        """ c'tor
    """
        super(Transformation, self).__init__()

        self.paramTypes = {
            'TransformationID': [types.IntType, types.LongType],
            'TransformationName': types.StringTypes,
            'Status': types.StringTypes,
            'Description': types.StringTypes,
            'LongDescription': types.StringTypes,
            'Type': types.StringTypes,
            'Plugin': types.StringTypes,
            'AgentType': types.StringTypes,
            'FileMask': types.StringTypes,
            'TransformationGroup': types.StringTypes,
            'GroupSize': [types.IntType, types.LongType, types.FloatType],
            'InheritedFrom': [types.IntType, types.LongType],
            'Body': types.StringTypes,
            'MaxNumberOfTasks': [types.IntType, types.LongType],
            'EventsPerTask': [types.IntType, types.LongType]
        }
        self.paramValues = {
            'TransformationID': 0,
            'TransformationName': '',
            'Status': 'New',
            'Description': '',
            'LongDescription': '',
            'Type': '',
            'Plugin': 'Standard',
            'AgentType': 'Manual',
            'FileMask': '',
            'TransformationGroup': 'General',
            'GroupSize': 1,
            'InheritedFrom': 0,
            'Body': '',
            'MaxNumberOfTasks': 0,
            'EventsPerTask': 0
        }
        self.ops = Operations()
        self.supportedPlugins = self.ops.getValue(
            'Transformations/AllowedPlugins',
            ['Broadcast', 'Standard', 'BySize', 'ByShare'])
        if not transClient:
            self.transClient = TransformationClient()
        else:
            self.transClient = transClient
        self.serverURL = self.transClient.getServer()
        self.exists = False
        if transID:
            self.paramValues['TransformationID'] = transID
            res = self.getTransformation()
            if res['OK']:
                self.exists = True
            elif res['Message'] == 'Transformation does not exist':
                raise AttributeError('TransformationID %d does not exist' %
                                     transID)
            else:
                self.paramValues['TransformationID'] = 0
                gLogger.fatal(
                    "Failed to get transformation from database",
                    "%s @ %s" % (transID, self.transClient.serverURL))
Beispiel #58
0
            if self.__piParams.strict:
                return result

        for pilotGroup in pI.getGroupsToUpload():
            result = pI.uploadProxy(userGroup=pilotGroup)
            if not result['OK']:
                if self.__piParams.strict:
                    return result

        return S_OK()


if __name__ == "__main__":
    piParams = Params()
    piParams.registerCLISwitches()

    Script.disableCS()
    Script.parseCommandLine(ignoreErrors=True)
    DIRAC.gConfig.setOptionValue("/DIRAC/Security/UseServerCertificate",
                                 "False")

    pI = ProxyInit(piParams)
    result = pI.doTheMagic()
    if not result['OK']:
        gLogger.fatal(result['Message'])
        sys.exit(1)

    pI.printInfo()

    sys.exit(0)
Beispiel #59
0
Remove the given file or a list of files from the File Catalog

Usage:
   %s <LFN | fileContainingLFNs>
""" % Script.scriptName)

Script.parseCommandLine()

from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
allowUsers = Operations().getValue("DataManagement/AllowUserReplicaManagement",
                                   False)

from DIRAC.Core.Security.ProxyInfo import getProxyInfo
res = getProxyInfo()
if not res['OK']:
    gLogger.fatal("Can't get proxy info", res['Message'])
    dexit(1)
properties = res['Value'].get('groupProperties', [])

if not allowUsers:
    if 'FileCatalogManagement' not in properties:
        gLogger.error(
            "You need to use a proxy from a group with FileCatalogManagement")
        dexit(5)

from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
fc = FileCatalog()
import os

args = Script.getPositionalArgs()
Beispiel #60
0
                gLogger.notice('%s status of %s is already Active' %
                               (statusType, se))
                continue
            if statusType in seOptions:
                if not seOptions[statusType] in ALLOWED_STATUSES:
                    gLogger.notice('%s option for %s is %s, instead of %s' %
                                   (statusType, se, seOptions['ReadAccess'],
                                    ALLOWED_STATUSES))
                    gLogger.notice('Try specifying the command switches')
                else:
                    resR = resourceStatus.setElementStatus(
                        se, "StorageElement", statusType, 'Active', reason,
                        userName)
                    if not resR['OK']:
                        gLogger.fatal(
                            "Failed to update %s %s to Active, exit -" %
                            (se, statusType), resR['Message'])
                        DIRAC.exit(-1)
                    else:
                        gLogger.notice("Successfully updated %s %s to Active" %
                                       (se, statusType))
                        statusAllowedDict[statusType].append(se)

totalAllowed = 0
totalAllowedSEs = []
for statusType in STATUS_TYPES:
    totalAllowed += len(statusAllowedDict[statusType])
    totalAllowedSEs += statusAllowedDict[statusType]
totalAllowedSEs = list(set(totalAllowedSEs))

if not totalAllowed: