Пример #1
0
 def exec_taskError( cls, jid, cachedJobState, errorMsg, eType ):
   result = cachedJobState.commitChanges()
   if not result[ 'OK' ]:
     cls.log.error( "Cannot write changes to job %s: %s" % ( jid, result[ 'Message' ] ) )
   jobState = JobState( jid )
   result = jobState.getStatus()
   if result[ 'OK' ]:
     if result[ 'Value' ][0].lower() == "failed":
       return S_OK()
   else:
     cls.log.error( "Could not get status of job %s: %s" % ( jid, result[ 'Message ' ] ) )
   cls.log.notice( "Job %s: Setting to Failed|%s" % ( jid, errorMsg ) )
   return jobState.setStatus( "Failed", errorMsg, source = eType )
Пример #2
0
 def exec_taskError(cls, jid, cachedJobState, errorMsg):
     result = cachedJobState.commitChanges()
     if not result["OK"]:
         cls.log.error("Cannot write changes to job %s: %s" %
                       (jid, result["Message"]))
     jobState = JobState(jid)
     result = jobState.getStatus()
     if result["OK"]:
         if result["Value"][0].lower() == "failed":
             return S_OK()
     else:
         cls.log.error("Could not get status of job %s: %s" %
                       (jid, result["Message"]))
     cls.log.notice("Job %s: Setting to Failed|%s" % (jid, errorMsg))
     return jobState.setStatus("Failed",
                               errorMsg,
                               source="OptimizationMindHandler")
Пример #3
0
 def initializeHandler( cls, serviceInfoDict ):
   try:
     from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
     cls.__jobDB = JobDB()
   except Exception as excp:
     return S_ERROR( "Could not connect to JobDB: %s" % str( excp ) )
   cls.setFailedOnTooFrozen( False )
   cls.setFreezeOnFailedDispatch( False )
   cls.setFreezeOnUnknownExecutor( False )
   cls.setAllowedClients( "JobManager" )
   JobState.checkDBAccess()
   JobState.cleanTaskQueues()
   period = cls.srv_getCSOption( "LoadJobPeriod", 60 )
   result = ThreadScheduler.gThreadScheduler.addPeriodicTask( period, cls.__loadJobs )
   if not result[ 'OK' ]:
     return result
   cls.__loadTaskId = result[ 'Value' ]
   return cls.__loadJobs()
Пример #4
0
 def initializeHandler(cls, serviceInfoDict):
     try:
         from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
         cls.__jobDB = JobDB()
     except Exception as excp:
         return S_ERROR("Could not connect to JobDB: %s" % str(excp))
     cls.setFailedOnTooFrozen(False)
     cls.setFreezeOnFailedDispatch(False)
     cls.setFreezeOnUnknownExecutor(False)
     cls.setAllowedClients("JobManager")
     JobState.checkDBAccess()
     JobState.cleanTaskQueues()
     period = cls.srv_getCSOption("LoadJobPeriod", 60)
     result = ThreadScheduler.gThreadScheduler.addPeriodicTask(
         period, cls.__loadJobs)
     if not result['OK']:
         return result
     cls.__loadTaskId = result['Value']
     return cls.__loadJobs()
Пример #5
0
class CachedJobState(object):

    log = gLogger.getSubLogger("CachedJobState")

    def __init__(self, jid, skipInitState=False):
        self.dOnlyCache = False
        self.__jid = jid
        self.__jobState = JobState(jid)
        self.cleanState(skipInitState=skipInitState)

    def cleanState(self, skipInitState=False):
        self.__cache = {}
        self.__jobLog = []
        self.__insertIntoTQ = False
        self.__dirtyKeys = set()
        self.__manifest = False
        self.__initState = None
        self.__lastValidState = time.time()
        if not skipInitState:
            result = self.getAttributes(["Status", "MinorStatus", "LastUpdateTime"])
            if result["OK"]:
                self.__initState = result["Value"]
            else:
                self.__initState = None

    def recheckValidity(self, graceTime=600):
        now = time.time()
        if graceTime <= 0 or now - self.__lastValidState > graceTime:
            self.__lastValidState = now
            result = self.__jobState.getAttributes(["Status", "MinorStatus", "LastUpdateTime"])
            if not result["OK"]:
                return result
            currentState = result["Value"]
            if not currentState == self.__initState:
                return S_OK(False)
            return S_OK(True)
        return S_OK(self.valid)

    @property
    def valid(self):
        return self.__initState != None

    @property
    def jid(self):
        return self.__jid

    def getDirtyKeys(self):
        return set(self.__dirtyKeys)

    def commitChanges(self):
        if self.__initState == None:
            return S_ERROR("CachedJobState( %d ) is not valid" % self.__jid)
        changes = {}
        for k in self.__dirtyKeys:
            changes[k] = self.__cache[k]
        result = self.__jobState.commitCache(self.__initState, changes, self.__jobLog)
        try:
            result.pop("rpcStub")
        except KeyError:
            pass
        if not result["OK"]:
            self.cleanState()
            return result
        if not result["Value"]:
            self.cleanState()
            return S_ERROR("Initial state was different")
        newState = result["Value"]
        self.__jobLog = []
        self.__dirtyKeys.clear()
        # Save manifest
        if self.__manifest and self.__manifest.isDirty():
            result = self.__jobState.setManifest(self.__manifest)
            if not result["OK"]:
                self.cleanState()
                for i in range(5):
                    if self.__jobState.rescheduleJob()["OK"]:
                        break
                return result
            self.__manifest.clearDirty()
        # Insert into TQ
        if self.__insertIntoTQ:
            result = self.__jobState.insertIntoTQ()
            if not result["OK"]:
                self.cleanState()
                for i in range(5):
                    if self.__jobState.rescheduleJob()["OK"]:
                        break
                return result
            self.__insertIntoTQ = False

        self.__initState = newState
        self.__lastValidState = time.time()
        return S_OK()

    def serialize(self):
        if self.__manifest:
            manifest = (self.__manifest.dumpAsCFG(), self.__manifest.isDirty())
        else:
            manifest = None
        return DEncode.encode(
            (
                self.__jid,
                self.__cache,
                self.__jobLog,
                manifest,
                self.__initState,
                self.__insertIntoTQ,
                tuple(self.__dirtyKeys),
            )
        )

    @staticmethod
    def deserialize(stub):
        dataTuple, slen = DEncode.decode(stub)
        if len(dataTuple) != 7:
            return S_ERROR("Invalid stub")
        # jid
        if type(dataTuple[0]) not in (types.IntType, types.LongType):
            return S_ERROR("Invalid stub 0")
        # cache
        if type(dataTuple[1]) != types.DictType:
            return S_ERROR("Invalid stub 1")
        # trace
        if type(dataTuple[2]) != types.ListType:
            return S_ERROR("Invalid stub 2")
        # manifest
        tdt3 = type(dataTuple[3])
        if tdt3 != types.NoneType and (tdt3 != types.TupleType and len(dataTuple[3]) != 2):
            return S_ERROR("Invalid stub 3")
        # initstate
        if type(dataTuple[4]) != types.DictType:
            return S_ERROR("Invalid stub 4")
        # Insert into TQ
        if type(dataTuple[5]) != types.BooleanType:
            return S_ERROR("Invalid stub 5")
        # Dirty Keys
        if type(dataTuple[6]) != types.TupleType:
            return S_ERROR("Invalid stub 6")
        cjs = CachedJobState(dataTuple[0], skipInitState=True)
        cjs.__cache = dataTuple[1]
        cjs.__jobLog = dataTuple[2]
        dt3 = dataTuple[3]
        if dataTuple[3]:
            manifest = JobManifest()
            result = manifest.loadCFG(dt3[0])
            if not result["OK"]:
                return result
            if dt3[1]:
                manifest.setDirty()
            else:
                manifest.clearDirty()
            cjs.__manifest = manifest
        cjs.__initState = dataTuple[4]
        cjs.__insertIntoTQ = dataTuple[5]
        cjs.__dirtyKeys = set(dataTuple[6])
        return S_OK(cjs)

    def __cacheAdd(self, key, value):
        self.__cache[key] = value
        self.__dirtyKeys.add(key)

    def __cacheExists(self, keyList):
        if type(keyList) in types.StringTypes:
            keyList = [keyList]
        for key in keyList:
            if key not in self.__cache:
                return False
        return True

    def __cacheResult(self, cKey, functor, fArgs=None):
        keyType = type(cKey)
        # If it's a string
        if keyType in types.StringTypes:
            if cKey not in self.__cache:
                if self.dOnlyCache:
                    return S_ERROR("%s is not cached")
                if not fArgs:
                    fArgs = tuple()
                result = functor(*fArgs)
                if not result["OK"]:
                    return result
                data = result["Value"]
                self.__cache[cKey] = data
            return S_OK(self.__cache[cKey])
        # Tuple/List
        elif keyType in (types.ListType, types.TupleType):
            if not self.__cacheExists(cKey):
                if self.dOnlyCache:
                    return S_ERROR("%s is not cached")
                if not fArgs:
                    fArgs = tuple()
                result = functor(*fArgs)
                if not result["OK"]:
                    return result
                data = result["Value"]
                if len(cKey) != len(data):
                    gLogger.warn(
                        "CachedJobState.__memorize( %s, %s = %s ) doesn't receive the same amount of values as keys"
                        % (cKey, functor, data)
                    )
                    return data
                for i in range(len(cKey)):
                    self.__cache[cKey[i]] = data[i]
            # Prepare result
            return S_OK(tuple([self.__cache[cK] for cK in cKey]))
        else:
            raise RuntimeError("Cache key %s does not have a valid type" % cKey)

    def __cacheDict(self, prefix, functor, keyList=None):
        if not keyList or not self.__cacheExists(["%s.%s" % (prefix, key) for key in keyList]):
            result = functor(keyList)
            if not result["OK"]:
                return result
            data = result["Value"]
            for key in data:
                cKey = "%s.%s" % (prefix, key)
                # If the key is already in the cache. DO NOT TOUCH. User may have already modified it.
                # We update the coming data with the cached data
                if cKey in self.__cache:
                    data[key] = self.__cache[cKey]
                else:
                    self.__cache[cKey] = data[key]
            return S_OK(data)
        return S_OK(dict([(key, self.__cache["%s.%s" % (prefix, key)]) for key in keyList]))

    def _inspectCache(self):
        return copy.deepcopy(self.__cache)

    def _clearCache(self):
        self.__cache = {}

    @property
    def _internals(self):
        if self.__manifest:
            manifest = (self.__manifest.dumpAsCFG(), self.__manifest.isDirty())
        else:
            manifest = None
        return (
            self.__jid,
            self.dOnlyCache,
            dict(self.__cache),
            list(self.__jobLog),
            manifest,
            dict(self.__initState),
            list(self.__dirtyKeys),
        )

    #
    # Manifest
    #

    def getManifest(self):
        if not self.__manifest:
            result = self.__jobState.getManifest()
            if not result["OK"]:
                return result
            self.__manifest = result["Value"]
        return S_OK(self.__manifest)

    def setManifest(self, manifest):
        if not isinstance(manifest, JobManifest):
            jobManifest = JobManifest()
            result = jobManifest.load(str(manifest))
            if not result["OK"]:
                return result
            manifest = jobManifest
        manifest.setDirty()
        self.__manifest = manifest
        self.__manifest.clearDirty()
        return S_OK()

    # Attributes
    #

    def __addLogRecord(self, majorStatus=None, minorStatus=None, appStatus=None, source=None):
        record = {}
        if majorStatus:
            record["status"] = majorStatus
        if minorStatus:
            record["minor"] = minorStatus
        if appStatus:
            record["application"] = appStatus
        if not record:
            return
        if not source:
            source = "Unknown"
        self.__jobLog.append((record, Time.dateTime(), source))

    def setStatus(self, majorStatus, minorStatus=None, appStatus=None, source=None):
        self.__cacheAdd("att.Status", majorStatus)
        if minorStatus:
            self.__cacheAdd("att.MinorStatus", minorStatus)
        if appStatus:
            self.__cacheAdd("att.ApplicationStatus", appStatus)
        self.__addLogRecord(majorStatus, minorStatus, appStatus, source)
        return S_OK()

    def setMinorStatus(self, minorStatus, source=None):
        self.__cacheAdd("att.MinorStatus", minorStatus)
        self.__addLogRecord(minorStatus=minorStatus, source=source)
        return S_OK()

    def getStatus(self):
        return self.__cacheResult(("att.Status", "att.MinorStatus"), self.__jobState.getStatus)

    def setAppStatus(self, appStatus, source=None):
        self.__cacheAdd("att.ApplicationStatus", appStatus)
        self.__addLogRecord(appStatus=appStatus, source=source)
        return S_OK()

    def getAppStatus(self):
        return self.__cacheResult("att.ApplicationStatus", self.__jobState.getAppStatus)

    #
    # Attribs
    #

    def setAttribute(self, name, value):
        if type(name) not in types.StringTypes:
            return S_ERROR("Attribute name has to be a string")
        self.__cacheAdd("att.%s" % name, value)
        return S_OK()

    def setAttributes(self, attDict):
        if type(attDict) != types.DictType:
            return S_ERROR("Attributes has to be a dictionary and it's %s" % str(type(attDict)))
        for key in attDict:
            self.__cacheAdd("att.%s" % key, attDict[key])
        return S_OK()

    def getAttribute(self, name):
        return self.__cacheResult("att.%s" % name, self.__jobState.getAttribute, (name,))

    def getAttributes(self, nameList=None):
        return self.__cacheDict("att", self.__jobState.getAttributes, nameList)

    # Job params

    def setParameter(self, name, value):
        if type(name) not in types.StringTypes:
            return S_ERROR("Job parameter name has to be a string")
        self.__cacheAdd("jobp.%s" % name, value)
        return S_OK()

    def setParameters(self, pDict):
        if type(pDict) != types.DictType:
            return S_ERROR("Job parameters has to be a dictionary")
        for key in pDict:
            self.__cacheAdd("jobp.%s" % key, pDict[key])
        return S_OK()

    def getParameter(self, name):
        return self.__cacheResult("jobp.%s" % name, self.__jobState.getParameter, (name,))

    def getParameters(self, nameList=None):
        return self.__cacheDict("jobp", self.__jobState.getParameters, nameList)

    # Optimizer params

    def setOptParameter(self, name, value):
        if type(name) not in types.StringTypes:
            return S_ERROR("Optimizer parameter name has to be a string")
        self.__cacheAdd("optp.%s" % name, value)
        return S_OK()

    def setOptParameters(self, pDict):
        if type(pDict) != types.DictType:
            return S_ERROR("Optimizer parameters has to be a dictionary")
        for key in pDict:
            self.__cacheAdd("optp.%s" % key, pDict[key])
        return S_OK()

    def getOptParameter(self, name):
        return self.__cacheResult("optp.%s" % name, self.__jobState.getOptParameter, (name,))

    def getOptParameters(self, nameList=None):
        return self.__cacheDict("optp", self.__jobState.getOptParameters, nameList)

    # Other

    def resetJob(self, source=""):
        """ Reset the job!
    """
        result = self.__jobState.resetJob(source=source)
        if result["OK"]:
            self.__resetState()
        return result

    def getInputData(self):
        return self.__cacheResult("inputData", self.__jobState.getInputData)

    def insertIntoTQ(self):
        if self.valid:
            self.__insertIntoTQ = True
            return S_OK()
        return S_ERROR("Cached state is invalid")
Пример #6
0
 def __init__(self, jid, skipInitState=False):
     self.dOnlyCache = False
     self.__jid = jid
     self.__jobState = JobState(jid)
     self.cleanState(skipInitState=skipInitState)
Пример #7
0
class OptimizationMindHandler( ExecutorMindHandler ):

  __jobDB = False
  __optimizationStates = [ 'Received', 'Checking' ]
  __loadTaskId = False

  MSG_DEFINITIONS = { 'OptimizeJobs' : { 'jids' : ( types.ListType, types.TupleType ) } }

  auth_msg_OptimizeJobs = [ 'all' ]
  def msg_OptimizeJobs( self, msgObj ):
    jids = msgObj.jids
    for jid in jids:
      try:
        jid = int( jid )
      except ValueError:
        self.log.error( "Job ID %s has to be an integer" % jid )
        continue
      #Forget and add task to ensure state is reset
      self.forgetTask( jid )
      result = self.executeTask( jid, CachedJobState( jid ) )
      if not result[ 'OK' ]:
        self.log.error( "Could not add job %s to optimization: %s" % ( jid, result[ 'Value' ] ) )
      else:
        self.log.info( "Received new job %s" % jid )
    return S_OK()

  @classmethod
  def __loadJobs( cls, eTypes = None ):
    log = cls.log
    if cls.__loadTaskId:
      period = cls.srv_getCSOption( "LoadJobPeriod", 300 )
      ThreadScheduler.gThreadScheduler.setTaskPeriod( cls.__loadTaskId, period )
    if not eTypes:
      eConn = cls.getExecutorsConnected()
      eTypes = [ eType for eType in eConn if eConn[ eType ] > 0 ]
    if not eTypes:
      log.info( "No optimizer connected. Skipping load" )
      return S_OK()
    log.info( "Getting jobs for %s" % ",".join( eTypes ) )
    checkingMinors = [ eType.split("/")[1] for eType in eTypes if eType != "WorkloadManagement/JobPath" ]
    for opState in cls.__optimizationStates:
      #For Received states
      if opState == "Received":
        if 'WorkloadManagement/JobPath' not in eTypes:
          continue
        jobCond = { 'Status' : opState }
      #For checking states
      if opState == "Checking":
        if not checkingMinors:
          continue
        jobCond = { 'Status': opState, 'MinorStatus' : checkingMinors }
      #Do the magic
      jobTypeCondition = cls.srv_getCSOption( "JobTypeRestriction", [] )
      if jobTypeCondition:
        jobCond[ 'JobType' ] = jobTypeCondition
      result = cls.__jobDB.selectJobs( jobCond, limit = cls.srv_getCSOption( "JobQueryLimit", 10000 ) )
      if not result[ 'OK' ]:
        return result
      jidList = result[ 'Value' ]
      knownJids = cls.getTaskIds()
      added = 0
      for jid in jidList:
        jid = long( jid )
        if jid not in knownJids:
          #Same as before. Check that the state is ok.
          cls.executeTask( jid, CachedJobState( jid ) )
          added += 1
      log.info( "Added %s/%s jobs for %s state" % ( added, len( jidList ), opState ) )
    return S_OK()

  @classmethod
  def initializeHandler( cls, serviceInfoDict ):
    try:
      from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
      cls.__jobDB = JobDB()
    except Exception, excp:
      return S_ERROR( "Could not connect to JobDB: %s" % str( excp ) )
    cls.setFailedOnTooFrozen( False )
    cls.setFreezeOnFailedDispatch( False )
    cls.setFreezeOnUnknownExecutor( False )
    cls.setAllowedClients( "JobManager" )
    JobState.checkDBAccess()
    JobState.cleanTaskQueues()
    period = cls.srv_getCSOption( "LoadJobPeriod", 60 )
    result = ThreadScheduler.gThreadScheduler.addPeriodicTask( period, cls.__loadJobs )
    if not result[ 'OK' ]:
      return result
    cls.__loadTaskId = result[ 'Value' ]
    return cls.__loadJobs()
Пример #8
0
 def __init__(self, jid, skipInitState=False):
     self.log = gLogger.getSubLogger(self.__class__.__name__)
     self.dOnlyCache = False
     self.__jid = jid
     self.__jobState = JobState(jid)
     self.cleanState(skipInitState=skipInitState)
Пример #9
0
class CachedJobState:
    def __init__(self, jid, skipInitState=False):
        self.log = gLogger.getSubLogger(self.__class__.__name__)
        self.dOnlyCache = False
        self.__jid = jid
        self.__jobState = JobState(jid)
        self.cleanState(skipInitState=skipInitState)

    def cleanState(self, skipInitState=False):
        self.__cache = {}
        self.__jobLog = []
        self.__insertIntoTQ = False
        self.__dirtyKeys = set()
        self.__manifest = False
        self.__initState = None
        self.__lastValidState = time.time()
        if not skipInitState:
            result = self.getAttributes(
                ["Status", "MinorStatus", "LastUpdateTime"])
            if result["OK"]:
                self.__initState = result["Value"]
            else:
                self.__initState = None

    def recheckValidity(self, graceTime=600):
        now = time.time()
        if graceTime <= 0 or now - self.__lastValidState > graceTime:
            self.__lastValidState = now
            result = self.__jobState.getAttributes(
                ["Status", "MinorStatus", "LastUpdateTime"])
            if not result["OK"]:
                return result
            currentState = result["Value"]
            if not currentState == self.__initState:
                return S_OK(False)
            return S_OK(True)
        return S_OK(self.valid)

    @property
    def valid(self):
        return self.__initState is not None

    @property
    def jid(self):
        return self.__jid

    def getDirtyKeys(self):
        return set(self.__dirtyKeys)

    def commitChanges(self):
        if self.__initState is None:
            return S_ERROR("CachedJobState( %d ) is not valid" % self.__jid)
        changes = {}
        for k in self.__dirtyKeys:
            changes[k] = self.__cache[k]
        result = self.__jobState.commitCache(self.__initState, changes,
                                             self.__jobLog)
        try:
            result.pop("rpcStub")
        except KeyError:
            pass
        if not result["OK"]:
            self.cleanState()
            return result
        if not result["Value"]:
            self.cleanState()
            return S_ERROR("Initial state was different")
        newState = result["Value"]
        self.__jobLog = []
        self.__dirtyKeys.clear()
        # Save manifest
        if self.__manifest and self.__manifest.isDirty():
            result = self.__jobState.setManifest(self.__manifest)
            if not result["OK"]:
                self.cleanState()
                for _ in range(5):
                    if self.__jobState.rescheduleJob()["OK"]:
                        break
                return result
            self.__manifest.clearDirty()
        # Insert into TQ
        if self.__insertIntoTQ:
            result = self.__jobState.insertIntoTQ()
            if not result["OK"]:
                self.cleanState()
                for _ in range(5):
                    if self.__jobState.rescheduleJob()["OK"]:
                        break
                return result
            self.__insertIntoTQ = False

        self.__initState = newState
        self.__lastValidState = time.time()
        return S_OK()

    def serialize(self):
        if self.__manifest:
            manifest = [self.__manifest.dumpAsCFG(), self.__manifest.isDirty()]
        else:
            manifest = None
        data = DEncode.encode([
            self.__jid,
            self.__cache,
            self.__jobLog,
            manifest,
            self.__initState,
            self.__insertIntoTQ,
            list(self.__dirtyKeys),
        ])
        return data.decode()

    @staticmethod
    def deserialize(stub):
        dataTuple, _slen = DEncode.decode(stub.encode())
        if len(dataTuple) != 7:
            return S_ERROR("Invalid stub")
        # jid
        if not isinstance(dataTuple[0], int):
            return S_ERROR("Invalid stub 0")
        # cache
        if not isinstance(dataTuple[1], dict):
            return S_ERROR("Invalid stub 1")
        # trace
        if not isinstance(dataTuple[2], list):
            return S_ERROR("Invalid stub 2")
        # manifest
        if dataTuple[3] is not None and (
                not isinstance(dataTuple[3],
                               (tuple, list)) and len(dataTuple[3]) != 2):
            return S_ERROR("Invalid stub 3")
        # initstate
        if not isinstance(dataTuple[4], dict):
            return S_ERROR("Invalid stub 4")
        # Insert into TQ
        if not isinstance(dataTuple[5], bool):
            return S_ERROR("Invalid stub 5")
        # Dirty Keys
        if not isinstance(dataTuple[6], (tuple, list)):
            return S_ERROR("Invalid stub 6")
        cjs = CachedJobState(dataTuple[0], skipInitState=True)
        cjs.__cache = dataTuple[1]
        cjs.__jobLog = dataTuple[2]
        dt3 = dataTuple[3]
        if dataTuple[3]:
            manifest = JobManifest()
            result = manifest.loadCFG(dt3[0])
            if not result["OK"]:
                return result
            if dt3[1]:
                manifest.setDirty()
            else:
                manifest.clearDirty()
            cjs.__manifest = manifest
        cjs.__initState = dataTuple[4]
        cjs.__insertIntoTQ = dataTuple[5]
        cjs.__dirtyKeys = set(dataTuple[6])
        return S_OK(cjs)

    def __cacheAdd(self, key, value):
        self.__cache[key] = value
        self.__dirtyKeys.add(key)

    def __cacheExists(self, keyList):
        if isinstance(keyList, str):
            keyList = [keyList]
        for key in keyList:
            if key not in self.__cache:
                return False
        return True

    def __cacheResult(self, cKey, functor, fArgs=None):
        # If it's a string
        if isinstance(cKey, str):
            if cKey not in self.__cache:
                if self.dOnlyCache:
                    return S_ERROR("%s is not cached")
                if not fArgs:
                    fArgs = tuple()
                result = functor(*fArgs)
                if not result["OK"]:
                    return result
                data = result["Value"]
                self.__cache[cKey] = data
            return S_OK(self.__cache[cKey])
        # Tuple/List
        elif isinstance(cKey, (list, tuple)):
            if not self.__cacheExists(cKey):
                if self.dOnlyCache:
                    return S_ERROR("%s is not cached")
                if not fArgs:
                    fArgs = tuple()
                result = functor(*fArgs)
                if not result["OK"]:
                    return result
                data = result["Value"]
                if len(cKey) != len(data):
                    gLogger.warn(
                        "CachedJobState.__memorize( %s, %s = %s ) doesn't receive the same amount of values as keys"
                        % (cKey, functor, data))
                    return data
                for i, val in enumerate(cKey):
                    self.__cache[val] = data[i]
            # Prepare result
            return S_OK(tuple([self.__cache[cK] for cK in cKey]))
        else:
            raise RuntimeError("Cache key %s does not have a valid type" %
                               cKey)

    def __cacheDict(self, prefix, functor, keyList=None):
        if not keyList or not self.__cacheExists(
            ["%s.%s" % (prefix, key) for key in keyList]):
            result = functor(keyList)
            if not result["OK"]:
                return result
            data = result["Value"]
            for key in data:
                cKey = "%s.%s" % (prefix, key)
                # If the key is already in the cache. DO NOT TOUCH. User may have already modified it.
                # We update the coming data with the cached data
                if cKey in self.__cache:
                    data[key] = self.__cache[cKey]
                else:
                    self.__cache[cKey] = data[key]
            return S_OK(data)
        return S_OK(
            dict([(key, self.__cache["%s.%s" % (prefix, key)])
                  for key in keyList]))

    def _inspectCache(self):
        return copy.deepcopy(self.__cache)

    def _clearCache(self):
        self.__cache = {}

    @property
    def _internals(self):
        if self.__manifest:
            manifest = (self.__manifest.dumpAsCFG(), self.__manifest.isDirty())
        else:
            manifest = None
        return (
            self.__jid,
            self.dOnlyCache,
            dict(self.__cache),
            list(self.__jobLog),
            manifest,
            dict(self.__initState),
            list(self.__dirtyKeys),
        )

    #
    # Manifest
    #

    def getManifest(self):
        if not self.__manifest:
            result = self.__jobState.getManifest()
            if not result["OK"]:
                return result
            self.__manifest = result["Value"]
        return S_OK(self.__manifest)

    def setManifest(self, manifest):
        if not isinstance(manifest, JobManifest):
            jobManifest = JobManifest()
            result = jobManifest.load(str(manifest))
            if not result["OK"]:
                return result
            manifest = jobManifest
        manifest.setDirty()
        self.__manifest = manifest
        # self.__manifest.clearDirty()
        return S_OK()

    # Attributes
    #

    def __addLogRecord(self,
                       majorStatus=None,
                       minorStatus=None,
                       appStatus=None,
                       source=None):
        record = {}
        if majorStatus:
            record["status"] = majorStatus
        if minorStatus:
            record["minor"] = minorStatus
        if appStatus:
            record["application"] = appStatus
        if not record:
            return
        if not source:
            source = "Unknown"
        self.__jobLog.append([record, datetime.datetime.utcnow(), source])

    def setStatus(self,
                  majorStatus,
                  minorStatus=None,
                  appStatus=None,
                  source=None):
        self.__cacheAdd("att.Status", majorStatus)
        if minorStatus:
            self.__cacheAdd("att.MinorStatus", minorStatus)
        if appStatus:
            self.__cacheAdd("att.ApplicationStatus", appStatus)
        self.__addLogRecord(majorStatus, minorStatus, appStatus, source)
        return S_OK()

    def setMinorStatus(self, minorStatus, source=None):
        self.__cacheAdd("att.MinorStatus", minorStatus)
        self.__addLogRecord(minorStatus=minorStatus, source=source)
        return S_OK()

    def getStatus(self):
        return self.__cacheResult(("att.Status", "att.MinorStatus"),
                                  self.__jobState.getStatus)

    def setAppStatus(self, appStatus, source=None):
        self.__cacheAdd("att.ApplicationStatus", appStatus)
        self.__addLogRecord(appStatus=appStatus, source=source)
        return S_OK()

    def getAppStatus(self):
        return self.__cacheResult("att.ApplicationStatus",
                                  self.__jobState.getAppStatus)

    #
    # Attribs
    #

    def setAttribute(self, name, value):
        if not isinstance(name, str):
            return S_ERROR("Attribute name has to be a string")
        self.__cacheAdd("att.%s" % name, value)
        return S_OK()

    def setAttributes(self, attDict):
        if not isinstance(attDict, dict):
            return S_ERROR("Attributes has to be a dictionary and it's %s" %
                           str(type(attDict)))
        for key in attDict:
            self.__cacheAdd("att.%s" % key, attDict[key])
        return S_OK()

    def getAttribute(self, name):
        return self.__cacheResult("att.%s" % name,
                                  self.__jobState.getAttribute, (name, ))

    def getAttributes(self, nameList=None):
        return self.__cacheDict("att", self.__jobState.getAttributes, nameList)

    # JobParameters --- REMOVED

    # Optimizer params

    def setOptParameter(self, name, value):
        if not isinstance(name, str):
            return S_ERROR("Optimizer parameter name has to be a string")
        self.__cacheAdd("optp.%s" % name, value)
        return S_OK()

    def setOptParameters(self, pDict):
        if not isinstance(pDict, dict):
            return S_ERROR("Optimizer parameters has to be a dictionary")
        for key in pDict:
            self.__cacheAdd("optp.%s" % key, pDict[key])
        return S_OK()

    def getOptParameter(self, name):
        return self.__cacheResult("optp.%s" % name,
                                  self.__jobState.getOptParameter, (name, ))

    def getOptParameters(self, nameList=None):
        return self.__cacheDict("optp", self.__jobState.getOptParameters,
                                nameList)

    # Other

    def resetJob(self, source=""):
        """Reset the job!"""
        return self.__jobState.resetJob(source=source)

    def getInputData(self):
        return self.__cacheResult("inputData", self.__jobState.getInputData)

    def insertIntoTQ(self):
        if self.valid:
            self.__insertIntoTQ = True
            return S_OK()
        return S_ERROR("Cached state is invalid")
Пример #10
0
 def __init__(self, jid, skipInitState=False):
     self.dOnlyCache = False
     self.__jid = jid
     self.__jobState = JobState(jid)
     self.cleanState(skipInitState=skipInitState)
Пример #11
0
 def __getJobState(self, jid):
     return JobState(jid, forceLocal=True)