def initialize(self): """ This replaces the standard initialize from Service """ # Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR("Could not build service URL for %s" % GatewayService.GATEWAY_NAME) gLogger.verbose("Service URL is %s" % self._url) # Load handler result = self._loadHandlerInit() if not result['OK']: return result self._handler = result['Value'] # Discover Handler # TODO: remove later if useThreadPoolExecutor: self._threadPool = ThreadPoolExecutor(max(0, self._cfg.getMaxThreads())) else: self._threadPool = ThreadPool(1, max(0, self._cfg.getMaxThreads()), self._cfg.getMaxWaitingPetitions()) self._threadPool.daemonize() self._msgBroker = MessageBroker("%sMSB" % GatewayService.GATEWAY_NAME, threadPool=self._threadPool) self._msgBroker.useMessageObjects(False) getGlobalMessageBroker().useMessageObjects(False) self._msgForwarder = MessageForwarder(self._msgBroker) return S_OK()
def initialize(self): # Attribute defined outside __init__ # pylint: disable-msg=W0201 try: self.rsClient = ResourceStatusClient() self.sitesFreqs = CS.getTypedDictRootedAtOperations( 'CheckingFreqs/SitesFreqs') self.sitesToBeChecked = Queue.Queue() self.siteNamesInCheck = [] self.maxNumberOfThreads = self.am_getOption('maxThreadsInPool', 1) self.threadPool = ThreadPool(self.maxNumberOfThreads, self.maxNumberOfThreads) if not self.threadPool: self.log.error('Can not create Thread Pool') return S_ERROR('Can not create Thread Pool') for _i in xrange(self.maxNumberOfThreads): self.threadPool.generateJobAndQueueIt(self._executeCheck, args=(None, )) return S_OK() except Exception: errorStr = "SSInspectorAgent initialization" self.log.exception(errorStr) return S_ERROR(errorStr)
def initialize(self): """ Standard initialize. """ maxNumberOfThreads = self.am_getOption('maxNumberOfThreads', self.__maxNumberOfThreads) self.threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) res = ObjectLoader().loadObject( 'DIRAC.ResourceStatusSystem.Client.SiteStatus', 'SiteStatus') if not res['OK']: self.log.error('Failed to load SiteStatus class: %s' % res['Message']) return res siteStatusClass = res['Value'] res = ObjectLoader().loadObject( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient', 'ResourceManagementClient') if not res['OK']: self.log.error( 'Failed to load ResourceManagementClient class: %s' % res['Message']) return res rmClass = res['Value'] self.siteClient = siteStatusClass() self.clients['SiteStatus'] = siteStatusClass() self.clients['ResourceManagementClient'] = rmClass() return S_OK()
def initialize(self): # Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR("Could not build service URL for %s" % self._name) gLogger.verbose("Service URL is %s" % self._url) # Load handler result = self._loadHandlerInit() if not result['OK']: return result self._handler = result['Value'] # Initialize lock manager self._lockManager = LockManager(self._cfg.getMaxWaitingPetitions()) self._initMonitoring() self._threadPool = ThreadPool(max(1, self._cfg.getMinThreads()), max(0, self._cfg.getMaxThreads()), self._cfg.getMaxWaitingPetitions()) self._threadPool.daemonize() self._msgBroker = MessageBroker("%sMSB" % self._name, threadPool=self._threadPool) # Create static dict self._serviceInfoDict = {'serviceName': self._name, 'serviceSectionPath': PathFinder.getServiceSection(self._name), 'URL': self._cfg.getURL(), 'messageSender': MessageSender(self._name, self._msgBroker), 'validNames': self._validNames, 'csPaths': [PathFinder.getServiceSection(svcName) for svcName in self._validNames] } # Call static initialization function try: self._handler['class']._rh__initializeClass(dict(self._serviceInfoDict), self._lockManager, self._msgBroker, self._monitor) if self._handler['init']: for initFunc in self._handler['init']: gLogger.verbose("Executing initialization function") try: result = initFunc(dict(self._serviceInfoDict)) except Exception as excp: gLogger.exception("Exception while calling initialization function", lException=excp) return S_ERROR("Exception while calling initialization function: %s" % str(excp)) if not isReturnStructure(result): return S_ERROR("Service initialization function %s must return S_OK/S_ERROR" % initFunc) if not result['OK']: return S_ERROR("Error while initializing %s: %s" % (self._name, result['Message'])) except Exception as e: errMsg = "Exception while initializing %s" % self._name gLogger.exception(e) gLogger.exception(errMsg) return S_ERROR(errMsg) # Load actions after the handler has initialized itself result = self._loadActions() if not result['OK']: return result self._actions = result['Value'] gThreadScheduler.addPeriodicTask(30, self.__reportThreadPoolContents) return S_OK()
def __init__(self, **kwargs): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] # Excluded hosts if 'exclude' in kwargs: self.__hosts = list(set(self.__hosts) - set(kwargs['exclude'])) # Ping the hosts to remove those that don't have a SystemAdministrator service sysAdminHosts = [] for host in self.__hosts: client = SystemAdministratorClient(host) result = client.ping() if result['OK']: sysAdminHosts.append(host) self.__hosts = sysAdminHosts self.__kwargs = dict(kwargs) self.__pool = ThreadPool(len(self.__hosts)) self.__resultDict = {}
def initialize( self ): """ Standard constructor """ try: self.rsDB = ResourceStatusDB() self.rmDB = ResourceManagementDB() self.StorageElementToBeChecked = Queue.Queue() self.StorageElementInCheck = [] self.maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 ) self.threadPool = ThreadPool( self.maxNumberOfThreads, self.maxNumberOfThreads ) if not self.threadPool: self.log.error( 'Can not create Thread Pool' ) return S_ERROR( 'Can not create Thread Pool' ) self.setup = getSetup()[ 'Value' ] self.VOExtension = getExt() self.StorageElsWriteFreqs = CheckingFreqs[ 'StorageElsWriteFreqs' ] self.nc = NotificationClient() self.diracAdmin = DiracAdmin() self.csAPI = CSAPI() for _i in xrange( self.maxNumberOfThreads ): self.threadPool.generateJobAndQueueIt( self._executeCheck, args = ( None, ) ) return S_OK() except Exception: errorStr = "StElWriteInspectorAgent initialization" gLogger.exception( errorStr ) return S_ERROR( errorStr )
def __init__(self, VOExtension, rsDBIn=None, commandCallerIn=None, infoGetterIn=None, WMSAdminIn=None): """ Standard constructor :params: :attr:`VOExtension`: string, VO Extension (e.g. 'LHCb') :attr:`rsDBIn`: optional ResourceStatusDB object (see :class: `DIRAC.ResourceStatusSystem.DB.ResourceStatusDB.ResourceStatusDB`) :attr:`commandCallerIn`: optional CommandCaller object (see :class: `DIRAC.ResourceStatusSystem.Command.CommandCaller.CommandCaller`) :attr:`infoGetterIn`: optional InfoGetter object (see :class: `DIRAC.ResourceStatusSystem.Utilities.InfoGetter.InfoGetter`) :attr:`WMSAdminIn`: optional RPCClient object for WMSAdmin (see :class: `DIRAC.Core.DISET.RPCClient.RPCClient`) """ self.configModule = Utils.voimport( "DIRAC.ResourceStatusSystem.Policy.Configurations", VOExtension) if rsDBIn is not None: self.rsDB = rsDBIn else: from DIRAC.ResourceStatusSystem.DB.ResourceStatusDB import ResourceStatusDB self.rsDB = ResourceStatusDB() from DIRAC.ResourceStatusSystem.DB.ResourceManagementDB import ResourceManagementDB self.rmDB = ResourceManagementDB() if commandCallerIn is not None: self.cc = commandCallerIn else: from DIRAC.ResourceStatusSystem.Command.CommandCaller import CommandCaller self.cc = CommandCaller() if infoGetterIn is not None: self.ig = infoGetterIn else: from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import InfoGetter self.ig = InfoGetter(VOExtension) if WMSAdminIn is not None: self.WMSAdmin = WMSAdminIn else: from DIRAC.Core.DISET.RPCClient import RPCClient self.WMSAdmin = RPCClient("WorkloadManagement/WMSAdministrator") self.threadPool = ThreadPool(2, 5) self.lockObj = threading.RLock() self.infoForPanel_res = {}
def initialize(self): """ Agent initialization. The extensions MUST provide in the initialize method the following data members: - TransformationClient objects (self.transClient), - set the shifterProxy if different from the default one set here ('ProductionManager') - list of transformation types to be looked (self.transType) """ gMonitor.registerActivity("SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks", gMonitor.OP_ACUM) self.pluginLocation = self.am_getOption( 'PluginLocation', 'DIRAC.TransformationSystem.Client.TaskManagerPlugin') # Default clients self.transClient = TransformationClient() # Bulk submission flag self.bulkSubmissionFlag = self.am_getOption('BulkSubmission', False) # setting up the threading maxNumberOfThreads = self.am_getOption('maxNumberOfThreads', 15) threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) self.log.verbose("Multithreaded with %d threads" % maxNumberOfThreads) for i in xrange(maxNumberOfThreads): threadPool.generateJobAndQueueIt(self._execute, [i]) return S_OK()
def initialize( self ): """ Agent initialization. The extensions MUST provide in the initialize method the following data members: - TransformationClient objects (self.transClient), - set the shifterProxy if different from the default one set here ('ProductionManager') - list of transformation types to be looked (self.transType) """ gMonitor.registerActivity( "SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks", gMonitor.OP_ACUM ) self.pluginLocation = self.am_getOption( 'PluginLocation', 'DIRAC.TransformationSystem.Client.TaskManagerPlugin' ) # Default clients self.transClient = TransformationClient() # Bulk submission flag self.bulkSubmissionFlag = self.am_getOption( 'BulkSubmission', False ) # setting up the threading maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', 15 ) threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.log.verbose( "Multithreaded with %d threads" % maxNumberOfThreads ) for i in xrange( maxNumberOfThreads ): threadPool.generateJobAndQueueIt( self._execute, [i] ) return S_OK()
class MyProxyRenewalAgent(AgentModule): def initialize(self): requiredLifeTime = self.am_getOption( "MinimumLifeTime", 3600 ) renewedLifeTime = self.am_getOption( "RenewedLifeTime", 54000 ) myProxyServer = gConfig.getValue( "/DIRAC/VOPolicy/MyProxyServer" , "myproxy.cern.ch" ) self.proxyDB = ProxyDB( requireVoms = True, useMyProxy = True ) gLogger.info( "Minimum Life time : %s" % requiredLifeTime ) gLogger.info( "Life time on renew : %s" % renewedLifeTime ) gLogger.info( "MyProxy server : %s" % self.proxyDB.getMyProxyServer() ) gLogger.info( "MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime() ) self.__threadPool = ThreadPool( 1, 10 ) return S_OK() def __renewProxyForCredentials( self, userDN, userGroup ): lifeTime = self.am_getOption( "RenewedLifeTime", 54000 ) gLogger.info( "Renewing for %s@%s %s secs" % ( userDN, userGroup, lifeTime ) ) retVal = self.proxyDB.renewFromMyProxy( userDN, userGroup, lifeTime = lifeTime ) if not retVal[ 'OK' ]: gLogger.error( "Failed to renew for %s@%s : %s" %( userDN, userGroup, retVal[ 'Message' ] ) ) else: gLogger.info( "Renewed proxy for %s@%s" % ( userDN, userGroup ) ) def __treatRenewalCallback( self, oTJ, exceptionList ): gLogger.exception( lException = exceptionList ) def execute(self): """ The main agent execution method """ self.proxyDB.purgeLogs() gLogger.info( "Purging expired requests" ) retVal = self.proxyDB.purgeExpiredRequests() if retVal[ 'OK' ]: gLogger.info( " purged %s requests" % retVal[ 'Value' ] ) gLogger.info( "Purging expired proxies" ) retVal = self.proxyDB.purgeExpiredProxies() if retVal[ 'OK' ]: gLogger.info( " purged %s proxies" % retVal[ 'Value' ] ) retVal = self.proxyDB.getCredentialsAboutToExpire( self.am_getOption( "MinimumLifeTime" , 3600 ) ) if not retVal[ 'OK' ]: return retVal data = retVal[ 'Value' ] gLogger.info( "Renewing %s proxies..." % len( data ) ) for record in data: userDN = record[0] userGroup = record[1] self.__threadPool.generateJobAndQueueIt( self.__renewProxyForCredentials, args = ( userDN, userGroup ), oExceptionCallback = self.__treatRenewalCallback ) self.__threadPool.processAllResults() return S_OK()
class MyProxyRenewalAgent(AgentModule): def initialize(self): requiredLifeTime = self.am_getOption( "MinimumLifeTime", 3600 ) renewedLifeTime = self.am_getOption( "RenewedLifeTime", 54000 ) self.proxyDB = ProxyDB( useMyProxy = True ) gLogger.info( "Minimum Life time : %s" % requiredLifeTime ) gLogger.info( "Life time on renew : %s" % renewedLifeTime ) gLogger.info( "MyProxy server : %s" % self.proxyDB.getMyProxyServer() ) gLogger.info( "MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime() ) self.__threadPool = ThreadPool( 1, 10 ) return S_OK() def __renewProxyForCredentials( self, userDN, userGroup ): lifeTime = self.am_getOption( "RenewedLifeTime", 54000 ) gLogger.info( "Renewing for %s@%s %s secs" % ( userDN, userGroup, lifeTime ) ) retVal = self.proxyDB.renewFromMyProxy( userDN, userGroup, lifeTime = lifeTime ) if not retVal[ 'OK' ]: gLogger.error( "Failed to renew proxy", "for %s@%s : %s" %( userDN, userGroup, retVal[ 'Message' ] ) ) else: gLogger.info( "Renewed proxy for %s@%s" % ( userDN, userGroup ) ) def __treatRenewalCallback( self, oTJ, exceptionList ): gLogger.exception( lException = exceptionList ) def execute(self): """ The main agent execution method """ self.proxyDB.purgeLogs() gLogger.info( "Purging expired requests" ) retVal = self.proxyDB.purgeExpiredRequests() if retVal[ 'OK' ]: gLogger.info( " purged %s requests" % retVal[ 'Value' ] ) gLogger.info( "Purging expired proxies" ) retVal = self.proxyDB.purgeExpiredProxies() if retVal[ 'OK' ]: gLogger.info( " purged %s proxies" % retVal[ 'Value' ] ) retVal = self.proxyDB.getCredentialsAboutToExpire( self.am_getOption( "MinimumLifeTime" , 3600 ) ) if not retVal[ 'OK' ]: return retVal data = retVal[ 'Value' ] gLogger.info( "Renewing %s proxies..." % len( data ) ) for record in data: userDN = record[0] userGroup = record[1] self.__threadPool.generateJobAndQueueIt( self.__renewProxyForCredentials, args = ( userDN, userGroup ), oExceptionCallback = self.__treatRenewalCallback ) self.__threadPool.processAllResults() return S_OK()
def initialize(self): """ standard initialize """ # few parameters self.pluginLocation = self.am_getOption( 'PluginLocation', 'DIRAC.TransformationSystem.Agent.TransformationPlugin') self.transformationStatus = self.am_getOption( 'transformationStatus', ['Active', 'Completing', 'Flush']) # Prepare to change the name of the CS option as MaxFiles is ambiguous self.maxFiles = self.am_getOption('MaxFilesToProcess', self.am_getOption('MaxFiles', 5000)) agentTSTypes = self.am_getOption('TransformationTypes', []) if agentTSTypes: self.transformationTypes = sorted(agentTSTypes) else: dataProc = Operations().getValue('Transformations/DataProcessing', ['MCSimulation', 'Merge']) dataManip = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal']) self.transformationTypes = sorted(dataProc + dataManip) # clients self.transfClient = TransformationClient() # for caching using a pickle file self.workDirectory = self.am_getWorkDirectory() self.cacheFile = os.path.join(self.workDirectory, 'ReplicaCache.pkl') self.controlDirectory = self.am_getControlDirectory() # remember the offset if any in TS self.lastFileOffset = {} # Validity of the cache self.replicaCache = {} self.replicaCacheValidity = self.am_getOption('ReplicaCacheValidity', 2) self.noUnusedDelay = self.am_getOption('NoUnusedDelay', 6) # Get it threaded maxNumberOfThreads = self.am_getOption('maxThreadsInPool', 1) threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) self.log.info("Multithreaded with %d threads" % maxNumberOfThreads) for i in xrange(maxNumberOfThreads): threadPool.generateJobAndQueueIt(self._execute, [i]) self.log.info("Will treat the following transformation types: %s" % str(self.transformationTypes)) return S_OK()
def initialize(self): requiredLifeTime = self.am_getOption( "MinimumLifeTime", 3600 ) renewedLifeTime = self.am_getOption( "RenewedLifeTime", 54000 ) self.proxyDB = ProxyDB( useMyProxy = True ) gLogger.info( "Minimum Life time : %s" % requiredLifeTime ) gLogger.info( "Life time on renew : %s" % renewedLifeTime ) gLogger.info( "MyProxy server : %s" % self.proxyDB.getMyProxyServer() ) gLogger.info( "MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime() ) self.__threadPool = ThreadPool( 1, 10 ) return S_OK()
def initialize(self): """ agent's initialisation """ self.transferDB = TransferDB() self.am_setOption("shifterProxy", "DataManager") self.minThreads = self.am_getOption("MinThreads", self.minThreads) self.maxThreads = self.am_getOption("MaxThreads", self.maxThreads) minmax = (abs(self.minThreads), abs(self.maxThreads)) self.minThreads, self.maxThreads = min(minmax), max(minmax) self.log.info("ThreadPool min threads = %s" % self.minThreads) self.log.info("ThreadPool max threads = %s" % self.maxThreads) self.threadPool = ThreadPool(self.minThreads, self.maxThreads) self.threadPool.daemonize() return S_OK()
class SystemAdministratorIntegrator: def __init__( self, **kwargs ): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] self.__kwargs = dict( kwargs ) self.__pool = ThreadPool( len( self.__hosts ) ) self.__resultDict = {} def __getattr__( self, name ): self.call = name return self.execute def __executeClient( self, host, method, *parms, **kwargs ): """ Execute RPC method on a given host """ hostName = Registry.getHostOption( host, 'Host', host) client = SystemAdministratorClient( hostName, **self.__kwargs ) result = getattr( client, method )( *parms, **kwargs ) result['Host'] = host return result def __processResult( self, id_, result ): """ Collect results in the final structure """ host = result['Host'] del result['Host'] self.__resultDict[host] = result def execute(self, *args, **kwargs ): """ Main execution method """ self.__resultDict = {} for host in self.__hosts: self.__pool.generateJobAndQueueIt( self.__executeClient, args = [ host, self.call ] + list(args), kwargs = kwargs, oCallback = self.__processResult ) self.__pool.processAllResults() return S_OK( self.__resultDict )
class SystemAdministratorIntegrator: def __init__(self, **kwargs): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] self.__kwargs = dict(kwargs) self.__pool = ThreadPool(len(self.__hosts)) self.__resultDict = {} def __getattr__(self, name): self.call = name return self.execute def __executeClient(self, host, method, *parms, **kwargs): """ Execute RPC method on a given host """ hostName = Registry.getHostOption(host, 'Host', host) client = SystemAdministratorClient(hostName, **self.__kwargs) result = getattr(client, method)(*parms, **kwargs) result['Host'] = host return result def __processResult(self, id_, result): """ Collect results in the final structure """ host = result['Host'] del result['Host'] self.__resultDict[host] = result def execute(self, *args, **kwargs): """ Main execution method """ self.__resultDict = {} for host in self.__hosts: self.__pool.generateJobAndQueueIt(self.__executeClient, args=[host, self.call] + list(args), kwargs=kwargs, oCallback=self.__processResult) self.__pool.processAllResults() return S_OK(self.__resultDict)
def initialize(self): """ Standard initialize. """ maxNumberOfThreads = self.am_getOption('maxNumberOfThreads', self.__maxNumberOfThreads) self.threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) self.siteClient = SiteStatus() self.clients['SiteStatus'] = self.siteClient self.clients['ResourceManagementClient'] = ResourceManagementClient() return S_OK()
def initialize( self ): #Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR( "Could not build service URL for %s" % self._name ) gLogger.verbose( "Service URL is %s" % self._url ) #Discover Handler self._handlerLocation = self._discoverHandlerLocation() if not self._handlerLocation: return S_ERROR( "Could not find handler location for %s" % self._name ) gLogger.verbose( "Handler found at %s" % self._handlerLocation ) #Load handler result = self._loadHandler() if not result[ 'OK' ]: return result self._handler = result[ 'Value' ] #Initialize lock manager self._lockManager = LockManager( self._cfg.getMaxWaitingPetitions() ) #Load actions result = self._loadActions() if not result[ 'OK' ]: return result self._actions = result[ 'Value' ] self._initMonitoring() self._threadPool = ThreadPool( 1, max( 0, self._cfg.getMaxThreads() ), self._cfg.getMaxWaitingPetitions() ) self._threadPool.daemonize() self._msgBroker = MessageBroker( "%sMSB" % self._name, threadPool = self._threadPool ) #Create static dict self._serviceInfoDict = { 'serviceName' : self._name, 'URL' : self._cfg.getURL(), 'systemSectionPath' : self._cfg.getSystemPath(), 'serviceSectionPath' : self._cfg.getServicePath(), 'messageSender' : MessageSender( self._msgBroker ) } #Call static initialization function try: if self._handler[ 'init' ]: result = self._handler[ 'init' ]( dict( self._serviceInfoDict ) ) if not isReturnStructure( result ): return S_ERROR( "Service initialization function must return S_OK/S_ERROR" ) if not result[ 'OK' ]: return S_ERROR( "Error while initializing %s: %s" % ( self._name, result[ 'Message' ] ) ) except Exception, e: errMsg = "Exception while intializing %s" % self._name gLogger.exception( errMsg ) return S_ERROR( errMsg )
def __addPool(self, poolName): # create a new thread Pool, by default it has 2 executing threads and 40 requests # in the Queue if not poolName: return None if poolName in self.pools: return None pool = ThreadPool(self.am_getOption('minThreadsInPool'), self.am_getOption('maxThreadsInPool'), self.am_getOption('totalThreadsInPool')) # Daemonize except "Default" pool if poolName != 'Default': pool.daemonize() self.pools[poolName] = pool return poolName
def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
def initialize( self ): # Attribute defined outside __init__ # pylint: disable-msg=W0201 try: self.rsClient = ResourceStatusClient() self.resourcesFreqs = CS.getTypedDictRootedAtOperations( 'CheckingFreqs/ResourcesFreqs' ) self.resourcesToBeChecked = Queue.Queue() self.resourceNamesInCheck = [] self.maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 ) self.threadPool = ThreadPool( self.maxNumberOfThreads, self.maxNumberOfThreads ) if not self.threadPool: self.log.error( 'Can not create Thread Pool' ) return S_ERROR( 'Can not create Thread Pool' ) for _i in xrange( self.maxNumberOfThreads ): self.threadPool.generateJobAndQueueIt( self._executeCheck, args = ( None, ) ) return S_OK() except Exception: errorStr = "RSInspectorAgent initialization" self.log.exception( errorStr ) return S_ERROR( errorStr )
def initialize( self ): #Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR( "Could not build service URL for %s" % self._name ) gLogger.verbose( "Service URL is %s" % self._url ) #Load handler result = self._loadHandlerInit() if not result[ 'OK' ]: return result self._handler = result[ 'Value' ] #Initialize lock manager self._lockManager = LockManager( self._cfg.getMaxWaitingPetitions() ) self._initMonitoring() self._threadPool = ThreadPool( max( 1, self._cfg.getMinThreads() ), max( 0, self._cfg.getMaxThreads() ), self._cfg.getMaxWaitingPetitions() ) self._threadPool.daemonize() self._msgBroker = MessageBroker( "%sMSB" % self._name, threadPool = self._threadPool ) #Create static dict self._serviceInfoDict = { 'serviceName' : self._name, 'serviceSectionPath' : PathFinder.getServiceSection( self._name ), 'URL' : self._cfg.getURL(), 'messageSender' : MessageSender( self._name, self._msgBroker ), 'validNames' : self._validNames, 'csPaths' : [ PathFinder.getServiceSection( svcName ) for svcName in self._validNames ] } #Call static initialization function try: self._handler[ 'class' ]._rh__initializeClass( dict( self._serviceInfoDict ), self._lockManager, self._msgBroker, self._monitor ) if self._handler[ 'init' ]: for initFunc in self._handler[ 'init' ]: gLogger.verbose( "Executing initialization function" ) try: result = initFunc( dict( self._serviceInfoDict ) ) except Exception as excp: gLogger.exception( "Exception while calling initialization function", lException = excp ) return S_ERROR( "Exception while calling initialization function: %s" % str( excp ) ) if not isReturnStructure( result ): return S_ERROR( "Service initialization function %s must return S_OK/S_ERROR" % initFunc ) if not result[ 'OK' ]: return S_ERROR( "Error while initializing %s: %s" % ( self._name, result[ 'Message' ] ) ) except Exception as e: errMsg = "Exception while initializing %s" % self._name gLogger.exception( e ) gLogger.exception( errMsg ) return S_ERROR( errMsg ) #Load actions after the handler has initialized itself result = self._loadActions() if not result[ 'OK' ]: return result self._actions = result[ 'Value' ] gThreadScheduler.addPeriodicTask( 30, self.__reportThreadPoolContents ) return S_OK()
def __addPool( self, poolName ): # create a new thread Pool, by default it has 2 executing threads and 40 requests # in the Queue if not poolName: return None if poolName in self.pools: return None pool = ThreadPool( self.am_getOption( 'minThreadsInPool' ), self.am_getOption( 'maxThreadsInPool' ), self.am_getOption( 'totalThreadsInPool' ) ) # Daemonize except "Default" pool if poolName != 'Default': pool.daemonize() self.pools[poolName] = pool return poolName
def __init__(self, **kwargs): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] self.__kwargs = dict(kwargs) self.__pool = ThreadPool(len(self.__hosts)) self.__resultDict = {}
def __init__( self, **kwargs ): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] # Excluded hosts if 'exclude' in kwargs: self.__hosts = list ( set( self.__hosts ) - set( kwargs[ 'exclude' ] ) ) # Ping the hosts to remove those that don't have a SystemAdministrator service sysAdminHosts = [] for host in self.__hosts: client = SystemAdministratorClient( host ) result = client.ping() if result[ 'OK' ]: sysAdminHosts.append( host ) self.__hosts = sysAdminHosts self.__kwargs = dict( kwargs ) self.__pool = ThreadPool( len( self.__hosts ) ) self.__resultDict = {}
def initialize(self): """ Standard constructor """ try: self.rsDB = ResourceStatusDB() self.rmDB = ResourceManagementDB() self.ResourcesToBeChecked = Queue.Queue() self.ResourceNamesInCheck = [] self.maxNumberOfThreads = self.am_getOption('maxThreadsInPool', 1) self.threadPool = ThreadPool(self.maxNumberOfThreads, self.maxNumberOfThreads) if not self.threadPool: self.log.error('Can not create Thread Pool') return S_ERROR('Can not create Thread Pool') self.setup = getSetup()['Value'] self.VOExtension = getExt() configModule = __import__( self.VOExtension + "DIRAC.ResourceStatusSystem.Policy.Configurations", globals(), locals(), ['*']) self.Resources_check_freq = copy.deepcopy( configModule.Resources_check_freq) self.nc = NotificationClient() self.diracAdmin = DiracAdmin() self.csAPI = CSAPI() for i in xrange(self.maxNumberOfThreads): self.threadPool.generateJobAndQueueIt(self._executeCheck, args=(None, )) return S_OK() except Exception: errorStr = "RSInspectorAgent initialization" gLogger.exception(errorStr) return S_ERROR(errorStr)
def initialize(self): """ standard initialize """ # few parameters self.pluginLocation = self.am_getOption('PluginLocation', 'DIRAC.TransformationSystem.Agent.TransformationPlugin') self.transformationStatus = self.am_getOption('transformationStatus', ['Active', 'Completing', 'Flush']) # Prepare to change the name of the CS option as MaxFiles is ambiguous self.maxFiles = self.am_getOption('MaxFilesToProcess', self.am_getOption('MaxFiles', 5000)) agentTSTypes = self.am_getOption('TransformationTypes', []) if agentTSTypes: self.transformationTypes = sorted(agentTSTypes) else: dataProc = Operations().getValue('Transformations/DataProcessing', ['MCSimulation', 'Merge']) dataManip = Operations().getValue('Transformations/DataManipulation', ['Replication', 'Removal']) self.transformationTypes = sorted(dataProc + dataManip) # clients self.transfClient = TransformationClient() # for caching using a pickle file self.workDirectory = self.am_getWorkDirectory() self.cacheFile = os.path.join(self.workDirectory, 'ReplicaCache.pkl') self.controlDirectory = self.am_getControlDirectory() # remember the offset if any in TS self.lastFileOffset = {} # Validity of the cache self.replicaCache = {} self.replicaCacheValidity = self.am_getOption('ReplicaCacheValidity', 2) self.noUnusedDelay = self.am_getOption('NoUnusedDelay', 6) # Get it threaded maxNumberOfThreads = self.am_getOption('maxThreadsInPool', 1) threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) self.log.info("Multithreaded with %d threads" % maxNumberOfThreads) for i in xrange(maxNumberOfThreads): threadPool.generateJobAndQueueIt(self._execute, [i]) self.log.info("Will treat the following transformation types: %s" % str(self.transformationTypes)) return S_OK()
def initialize(self): requiredLifeTime = self.am_getOption("MinimumLifeTime", 3600) renewedLifeTime = self.am_getOption("RenewedLifeTime", 54000) myProxyServer = gConfig.getValue("/DIRAC/VOPolicy/MyProxyServer", "myproxy.cern.ch") self.proxyDB = ProxyDB(requireVoms=True, useMyProxy=True) gLogger.info("Minimum Life time : %s" % requiredLifeTime) gLogger.info("Life time on renew : %s" % renewedLifeTime) gLogger.info("MyProxy server : %s" % self.proxyDB.getMyProxyServer()) gLogger.info("MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime()) self.__threadPool = ThreadPool(1, 10) return S_OK()
def initialize( self ): """ Standard initialize. """ maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', self.__maxNumberOfThreads ) self.threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.elementType = self.am_getOption( 'elementType', self.elementType ) self.rsClient = ResourceStatusClient() self.clients[ 'ResourceStatusClient' ] = self.rsClient self.clients[ 'ResourceManagementClient' ] = ResourceManagementClient() if not self.elementType: return S_ERROR( 'Missing elementType' ) return S_OK()
def initialize( self ): #Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR( "Could not build service URL for %s" % GatewayService.GATEWAY_NAME ) gLogger.verbose( "Service URL is %s" % self._url ) #Discover Handler self._initMonitoring() self._threadPool = ThreadPool( 1, max( 0, self._cfg.getMaxThreads() ), self._cfg.getMaxWaitingPetitions() ) self._threadPool.daemonize() self._msgBroker = MessageBroker( "%sMSB" % GatewayService.GATEWAY_NAME, threadPool = self._threadPool ) self._msgBroker.useMessageObjects( False ) getGlobalMessageBroker().useMessageObjects( False ) self._msgForwarder = MessageForwarder( self._msgBroker ) return S_OK()
def initialize( self ): """ standard initialize """ # few parameters self.pluginLocation = self.am_getOption( 'PluginLocation', 'DIRAC.TransformationSystem.Agent.TransformationPlugin' ) self.transformationStatus = self.am_getOption( 'transformationStatus', ['Active', 'Completing', 'Flush'] ) self.maxFiles = self.am_getOption( 'MaxFiles', 5000 ) agentTSTypes = self.am_getOption( 'TransformationTypes', [] ) if agentTSTypes: self.transformationTypes = sorted( agentTSTypes ) else: dataProc = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] ) dataManip = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] ) self.transformationTypes = sorted( dataProc + dataManip ) # clients self.transfClient = TransformationClient() # shifter self.am_setOption( 'shifterProxy', 'ProductionManager' ) # for caching using a pickle file self.__readCache() self.workDirectory = self.am_getWorkDirectory() self.cacheFile = os.path.join( self.workDirectory, 'ReplicaCache.pkl' ) self.controlDirectory = self.am_getControlDirectory() self.replicaCacheValidity = self.am_getOption( 'ReplicaCacheValidity', 2 ) self.noUnusedDelay = self.am_getOption( 'NoUnusedDelay', 6 ) self.dateWriteCache = datetime.datetime.utcnow() # Get it threaded maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 ) threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.log.info( "Multithreaded with %d threads" % maxNumberOfThreads ) for i in xrange( maxNumberOfThreads ): threadPool.generateJobAndQueueIt( self._execute, [i] ) self.log.info( "Will treat the following transformation types: %s" % str( self.transformationTypes ) ) return S_OK()
def initialize( self ): """ standard initialize """ self.__readCache() self.dateWriteCache = datetime.datetime.utcnow() self.am_setOption( 'shifterProxy', 'ProductionManager' ) # Get it threaded maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 ) threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.log.info( "Multithreaded with %d threads" % maxNumberOfThreads ) for i in xrange( maxNumberOfThreads ): threadPool.generateJobAndQueueIt( self._execute, [i] ) return S_OK()
def initialize(self): """Sets default parameters """ self.jobDB = JobDB() self.logDB = JobLoggingDB() self.am_setOption('PollingTime', 60 * 60) if not self.am_getOption('Enable', True): self.log.info('Stalled Job Agent running in disabled mode') # setting up the threading maxNumberOfThreads = self.am_getOption('MaxNumberOfThreads', 15) threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) self.log.verbose("Multithreaded with %d threads" % maxNumberOfThreads) for _ in range(maxNumberOfThreads): threadPool.generateJobAndQueueIt(self._execute) return S_OK()
def __init__(self, VOExtension, rsDBIn = None, commandCallerIn = None, infoGetterIn = None, WMSAdminIn = None): """ Standard constructor :params: :attr:`VOExtension`: string, VO Extension (e.g. 'LHCb') :attr:`rsDBIn`: optional ResourceStatusDB object (see :class: `DIRAC.ResourceStatusSystem.DB.ResourceStatusDB.ResourceStatusDB`) :attr:`commandCallerIn`: optional CommandCaller object (see :class: `DIRAC.ResourceStatusSystem.Command.CommandCaller.CommandCaller`) :attr:`infoGetterIn`: optional InfoGetter object (see :class: `DIRAC.ResourceStatusSystem.Utilities.InfoGetter.InfoGetter`) :attr:`WMSAdminIn`: optional RPCClient object for WMSAdmin (see :class: `DIRAC.Core.DISET.RPCClient.RPCClient`) """ self.configModule = Utils.voimport("DIRAC.ResourceStatusSystem.Policy.Configurations", VOExtension) if rsDBIn is not None: self.rsDB = rsDBIn else: from DIRAC.ResourceStatusSystem.DB.ResourceStatusDB import ResourceStatusDB self.rsDB = ResourceStatusDB() from DIRAC.ResourceStatusSystem.DB.ResourceManagementDB import ResourceManagementDB self.rmDB = ResourceManagementDB() if commandCallerIn is not None: self.cc = commandCallerIn else: from DIRAC.ResourceStatusSystem.Command.CommandCaller import CommandCaller self.cc = CommandCaller() if infoGetterIn is not None: self.ig = infoGetterIn else: from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import InfoGetter self.ig = InfoGetter(VOExtension) if WMSAdminIn is not None: self.WMSAdmin = WMSAdminIn else: from DIRAC.Core.DISET.RPCClient import RPCClient self.WMSAdmin = RPCClient("WorkloadManagement/WMSAdministrator") self.threadPool = ThreadPool( 2, 5 ) self.lockObj = threading.RLock() self.infoForPanel_res = {}
def initialize(self): """ Make the necessary initializations. The ThreadPool is created here, the _execute() method is what each thread will execute. """ self.fullUpdatePeriod = self.am_getOption('FullUpdatePeriod', self.fullUpdatePeriod) self.bkUpdateLatency = self.am_getOption('BKUpdateLatency', self.bkUpdateLatency) self.debug = self.am_getOption('verbose', self.debug) self.pickleFile = os.path.join(self.am_getWorkDirectory(), self.pickleFile) self.chunkSize = self.am_getOption('maxFilesPerChunk', self.chunkSize) self.pluginsWithNoRunInfo = Operations().getValue('TransformationPlugins/PluginsWithNoRunInfo', self.pluginsWithNoRunInfo) self._logInfo('Full Update Period: %d seconds' % self.fullUpdatePeriod) self._logInfo('BK update latency : %d seconds' % self.bkUpdateLatency) self._logInfo('Plugins with no run info: %s' % ', '.join(self.pluginsWithNoRunInfo)) self.transClient = TransformationClient() self.bkClient = BookkeepingClient() try: with open(self.pickleFile, 'r') as pf: self.timeLog = pickle.load(pf) self.fullTimeLog = pickle.load(pf) self.bkQueries = pickle.load(pf) self._logInfo("successfully loaded Log from", self.pickleFile, "initialize") except (EOFError, IOError): self._logInfo("failed loading Log from", self.pickleFile, "initialize") self.timeLog = {} self.fullTimeLog = {} self.bkQueries = {} maxNumberOfThreads = self.am_getOption('maxThreadsInPool', 1) threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads) for i in xrange(maxNumberOfThreads): threadPool.generateJobAndQueueIt(self._execute, [i]) gMonitor.registerActivity("Iteration", "Agent Loops", AGENT_NAME, "Loops/min", gMonitor.OP_SUM) return S_OK()
def __addPool(self, poolName): """ create a new thread Pool, by default it has 2 executing threads and 40 requests in the Queue """ if not poolName: return None if poolName in self.pools: return None pool = ThreadPool( self.am_getOption("minThreadsInPool"), self.am_getOption("maxThreadsInPool"), self.am_getOption("totalThreadsInPool"), ) # Daemonize except "Default" pool if poolName != "Default": pool.daemonize() self.pools[poolName] = pool return poolName
def initialize( self ): """ standard initialize """ self.__readCache() self.dateWriteCache = datetime.datetime.utcnow() self.am_setOption( 'shifterProxy', 'ProductionManager' ) # Get it threaded maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 ) threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.log.info( "Multithreaded with %d threads" % maxNumberOfThreads ) for i in xrange( maxNumberOfThreads ): threadPool.generateJobAndQueueIt( self._execute, [i] ) self.log.info( "Will treat the following transformation types: %s" % str( self.transformationTypes ) ) return S_OK()
def initialize( self ): """ agent's initialisation """ self.transferDB = TransferDB() self.am_setOption( "shifterProxy", "DataManager" ) self.minThreads = self.am_getOption( "MinThreads", self.minThreads ) self.maxThreads = self.am_getOption( "MaxThreads", self.maxThreads ) minmax = ( abs( self.minThreads ), abs( self.maxThreads ) ) self.minThreads, self.maxThreads = min( minmax ), max( minmax ) self.log.info( "ThreadPool min threads = %s" % self.minThreads ) self.log.info( "ThreadPool max threads = %s" % self.maxThreads ) self.threadPool = ThreadPool( self.minThreads, self.maxThreads ) self.threadPool.daemonize() return S_OK()
def initialize( self ): """ Standard initialize. """ maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', self.__maxNumberOfThreads ) self.threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.siteClient = SiteStatus() self.clients['SiteStatus'] = self.siteClient self.clients['ResourceManagementClient'] = ResourceManagementClient() return S_OK()
def initialize( self ): """ Called by the framework upon startup, before any cycle (execute method bellow) """ self.requestDBClient = RequestClient() # the RequestAgentMixIn needs the capitalized version, until is is fixed keep this. self.RequestDBClient = self.requestDBClient self.replicaManager = ReplicaManager() gMonitor.registerActivity( "Iteration", "Agent Loops", "RemovalAgent", "Loops/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Execute", "Request Processed", "RemovalAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Done", "Request Completed", "RemovalAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalAtt", "Physical removals attempted", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalDone", "Successful physical removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalFail", "Failed physical removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalSize", "Physically removed size", "RemovalAgent", "Bytes", gMonitor.OP_ACUM ) gMonitor.registerActivity( "ReplicaRemovalAtt", "Replica removal attempted", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ReplicaRemovalDone", "Successful replica removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ReplicaRemovalFail", "Failed replica removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileAtt", "File removal attempted", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileDone", "File removal done", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileFail", "File removal failed", "RemovalAgent", "Removal/min", gMonitor.OP_SUM ) self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', self.maxNumberOfThreads ) self.maxRequestsInQueue = self.am_getOption( 'RequestsInQueue', self.maxRequestsInQueue ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads, self.maxRequestsInQueue ) # Set the ThreadPool in daemon mode to process new ThreadedJobs as they are inserted self.threadPool.daemonize() self.maxRequests = self.am_getOption( 'MaxRequestsPerCycle', 1200. ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK()
def __init__(self, csPath=""): self.log = gLogger.getSubLogger("OutputDataExecutor") if not csPath: vo = gConfig.getValue("/DIRAC/VirtualOrganization", "") self.__transfersCSPath = '/Operations/%s/OutputData' % vo else: self.__transfersCSPath = csPath self.log.verbose("Reading transfer paths from %s" % self.__transfersCSPath) self.__requiredCSOptions = [ 'InputPath', 'InputFC', 'OutputPath', 'OutputFC', 'OutputSE' ] self.__threadPool = ThreadPool( gConfig.getValue("%s/MinTransfers" % self.__transfersCSPath, 1), gConfig.getValue("%s/MaxTransfers" % self.__transfersCSPath, 4), gConfig.getValue("%s/MaxQueuedTransfers" % self.__transfersCSPath, 100)) self.__threadPool.daemonize() self.__processingFiles = set() self.__okTransferredFiles = 0 self.__okTransferredBytes = 0 self.__failedFiles = {}
def initialize(self): requiredLifeTime = self.am_getOption( "MinimumLifeTime", 3600 ) renewedLifeTime = self.am_getOption( "RenewedLifeTime", 54000 ) myProxyServer = gConfig.getValue( "/DIRAC/VOPolicy/MyProxyServer" , "myproxy.cern.ch" ) self.proxyDB = ProxyDB( useMyProxy = True ) gLogger.info( "Minimum Life time : %s" % requiredLifeTime ) gLogger.info( "Life time on renew : %s" % renewedLifeTime ) gLogger.info( "MyProxy server : %s" % self.proxyDB.getMyProxyServer() ) gLogger.info( "MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime() ) self.__threadPool = ThreadPool( 1, 10 ) return S_OK()
def initialize(self): ''' Standard initialize. Uses the ProductionManager shifterProxy to modify the ResourceStatus DB ''' self.maxNumberOfThreads = self.am_getOption('maxNumberOfThreads', self.maxNumberOfThreads) self.elementType = self.am_getOption('elementType', self.elementType) self.checkingFreqs = self.am_getOption('checkingFreqs', self.checkingFreqs) self.limitQueueFeeder = self.am_getOption('limitQueueFeeder', self.limitQueueFeeder) self.elementsToBeChecked = Queue.Queue() self.threadPool = ThreadPool(self.maxNumberOfThreads, self.maxNumberOfThreads) self.rsClient = ResourceStatusClient() self.clients['ResourceStatusClient'] = self.rsClient self.clients['ResourceManagementClient'] = ResourceManagementClient() return S_OK()
def initialize(self): # Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR("Could not build service URL for %s" % GatewayService.GATEWAY_NAME) gLogger.verbose("Service URL is %s" % self._url) # Discover Handler self._initMonitoring() self._threadPool = ThreadPool(1, max(0, self._cfg.getMaxThreads()), self._cfg.getMaxWaitingPetitions()) self._threadPool.daemonize() self._msgBroker = MessageBroker("%sMSB" % GatewayService.GATEWAY_NAME, threadPool=self._threadPool) self._msgBroker.useMessageObjects(False) getGlobalMessageBroker().useMessageObjects(False) self._msgForwarder = MessageForwarder(self._msgBroker) return S_OK()
def __init__( self, **kwargs ): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] self.__kwargs = dict( kwargs ) self.__pool = ThreadPool( len( self.__hosts ) ) self.__resultDict = {}
def initialize( self ): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', 1 ) self.threadPoolDepth = self.am_getOption( 'ThreadPoolDepth', 1 ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK()
def initialize(self): # Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR("Could not build service URL for %s" % self._name) gLogger.verbose("Service URL is %s" % self._url) # Load handler result = self._loadHandlerInit() if not result["OK"]: return result self._handler = result["Value"] # Initialize lock manager self._lockManager = LockManager(self._cfg.getMaxWaitingPetitions()) self._initMonitoring() self._threadPool = ThreadPool(1, max(0, self._cfg.getMaxThreads()), self._cfg.getMaxWaitingPetitions()) self._threadPool.daemonize() self._msgBroker = MessageBroker("%sMSB" % self._name, threadPool=self._threadPool) # Create static dict self._serviceInfoDict = { "serviceName": self._name, "serviceSectionPath": PathFinder.getServiceSection(self._name), "URL": self._cfg.getURL(), "messageSender": MessageSender(self._name, self._msgBroker), "validNames": self._validNames, "csPaths": [PathFinder.getServiceSection(svcName) for svcName in self._validNames], } # Call static initialization function try: self._handler["class"]._rh__initializeClass( dict(self._serviceInfoDict), self._lockManager, self._msgBroker, self._monitor ) if self._handler["init"]: for initFunc in self._handler["init"]: gLogger.verbose("Executing initialization function") try: result = initFunc(dict(self._serviceInfoDict)) except Exception, excp: gLogger.exception("Exception while calling initialization function") return S_ERROR("Exception while calling initialization function: %s" % str(excp)) if not isReturnStructure(result): return S_ERROR("Service initialization function %s must return S_OK/S_ERROR" % initFunc) if not result["OK"]: return S_ERROR("Error while initializing %s: %s" % (self._name, result["Message"])) except Exception, e: errMsg = "Exception while initializing %s" % self._name gLogger.exception(errMsg) return S_ERROR(errMsg)
def __init__( self, csPath = "" ): self.log = gLogger.getSubLogger( "OutputDataExecutor" ) if not csPath: vo = gConfig.getValue( "/DIRAC/VirtualOrganization", "" ) self.__transfersCSPath = '/Operations/%s/OutputData' % vo else: self.__transfersCSPath = csPath self.log.verbose( "Reading transfer paths from %s" % self.__transfersCSPath ) self.__requiredCSOptions = ['InputPath', 'InputFC', 'OutputPath', 'OutputFC', 'OutputSE'] self.__threadPool = ThreadPool( gConfig.getValue( "%s/MinTransfers" % self.__transfersCSPath, 1 ), gConfig.getValue( "%s/MaxTransfers" % self.__transfersCSPath, 4 ), gConfig.getValue( "%s/MaxQueuedTransfers" % self.__transfersCSPath, 100 ) ) self.__threadPool.daemonize() self.__processingFiles = set() self.__okTransferredFiles = 0 self.__okTransferredBytes = 0 self.__failedFiles = {}
def __init__(self, **kwargs): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] # Excluded hosts if 'exclude' in kwargs: self.__hosts = list(set(self.__hosts) - set(kwargs['exclude'])) # Ping the hosts to remove those that don't have a SystemAdministrator service sysAdminHosts = [] self.silentHosts = [] self.__resultDict = {} self.__kwargs = {} pool = ThreadPool(len(self.__hosts)) for host in self.__hosts: pool.generateJobAndQueueIt(self.__executeClient, args=[host, "ping"], kwargs={}, oCallback=self.__processResult) pool.processAllResults() for host, result in self.__resultDict.items(): if result['OK']: sysAdminHosts.append(host) else: self.silentHosts.append(host) del pool self.__hosts = sysAdminHosts self.__kwargs = dict(kwargs) self.__pool = ThreadPool(len(self.__hosts)) self.__resultDict = {}
def initialize(self): """ Standard constructor """ try: self.rsDB = ResourceStatusDB() self.rmDB = ResourceManagementDB() self.ServicesToBeChecked = Queue.Queue() self.ServiceNamesInCheck = [] self.maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 ) self.threadPool = ThreadPool( self.maxNumberOfThreads, self.maxNumberOfThreads ) if not self.threadPool: self.log.error('Can not create Thread Pool') return S_ERROR('Can not create Thread Pool') self.setup = getSetup()['Value'] self.VOExtension = getExt() configModule = __import__(self.VOExtension+"DIRAC.ResourceStatusSystem.Policy.Configurations", globals(), locals(), ['*']) self.Services_check_freq = copy.deepcopy(configModule.Services_check_freq) self.nc = NotificationClient() self.diracAdmin = DiracAdmin() self.csAPI = CSAPI() for i in xrange(self.maxNumberOfThreads): self.threadPool.generateJobAndQueueIt(self._executeCheck, args = (None, ) ) return S_OK() except Exception: errorStr = "SeSInspectorAgent initialization" gLogger.exception(errorStr) return S_ERROR(errorStr)
def initialize( self ): ''' Standard initialize. Uses the ProductionManager shifterProxy to modify the ResourceStatus DB ''' self.maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', self.maxNumberOfThreads ) self.elementType = self.am_getOption( 'elementType', self.elementType ) self.checkingFreqs = self.am_getOption( 'checkingFreqs', self.checkingFreqs ) self.limitQueueFeeder = self.am_getOption( 'limitQueueFeeder', self.limitQueueFeeder ) self.elementsToBeChecked = Queue.Queue() self.threadPool = ThreadPool( self.maxNumberOfThreads, self.maxNumberOfThreads ) self.rsClient = ResourceStatusClient() self.clients[ 'ResourceStatusClient' ] = self.rsClient self.clients[ 'ResourceManagementClient' ] = ResourceManagementClient() return S_OK()
def _updateServiceConfiguration(self, urlSet, fromMaster=False): """ Update configuration in a set of service in parallel :param set urlSet: a set of service URLs :param fromMaster: flag to force updating from the master CS :return: Nothing """ pool = ThreadPool(len(urlSet)) for url in urlSet: pool.generateJobAndQueueIt(self._forceServiceUpdate, args=[url, fromMaster], kwargs={}, oCallback=self.__processResults) pool.processAllResults()
def __updateServiceConfiguration(self, urlSet, fromMaster=False): """ Update configuration in a set of service in parallel :param set urlSet: a set of service URLs :param fromMaster: flag to force updating from the master CS :return: S_OK/S_ERROR, Value Successful/Failed dict with service URLs """ pool = ThreadPool(len(urlSet)) for url in urlSet: pool.generateJobAndQueueIt(self.__forceServiceUpdate, args=[url, fromMaster], kwargs={}, oCallback=self.__processResults) pool.processAllResults() return S_OK(self.__updateResultDict)
def initialize( self ): """ This replaces the standard initialize from Service """ #Build the URLs self._url = self._cfg.getURL() if not self._url: return S_ERROR( "Could not build service URL for %s" % GatewayService.GATEWAY_NAME ) gLogger.verbose( "Service URL is %s" % self._url ) #Load handler result = self._loadHandlerInit() if not result[ 'OK' ]: return result self._handler = result[ 'Value' ] #Discover Handler self._threadPool = ThreadPool( 1, max( 0, self._cfg.getMaxThreads() ), self._cfg.getMaxWaitingPetitions() ) self._threadPool.daemonize() self._msgBroker = MessageBroker( "%sMSB" % GatewayService.GATEWAY_NAME, threadPool = self._threadPool ) self._msgBroker.useMessageObjects( False ) getGlobalMessageBroker().useMessageObjects( False ) self._msgForwarder = MessageForwarder( self._msgBroker ) return S_OK()
def initialize( self ): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() gMonitor.registerActivity( "Iteration", "Agent Loops", "TransferAgent", "Loops/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Execute", "Request Processed", "TransferAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Done", "Request Completed", "TransferAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replicate and register", "Replicate and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replicate", "Replicate operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put and register", "Put and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put", "Put operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replication successful", "Successful replications", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put successful", "Successful puts", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replication failed", "Failed replications", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put failed", "Failed puts", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replica registration successful", "Successful replica registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "File registration successful", "Successful file registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replica registration failed", "Failed replica registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "File registration failed", "Failed file registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', 1 ) self.threadPoolDepth = self.am_getOption( 'ThreadPoolDepth', 1 ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK()
def __init__( self, **kwargs ): """ Constructor """ if 'hosts' in kwargs: self.__hosts = kwargs['hosts'] del kwargs['hosts'] else: result = Registry.getHosts() if result['OK']: self.__hosts = result['Value'] else: self.__hosts = [] # Excluded hosts if 'exclude' in kwargs: self.__hosts = list ( set( self.__hosts ) - set( kwargs[ 'exclude' ] ) ) # Ping the hosts to remove those that don't have a SystemAdministrator service sysAdminHosts = [] self.silentHosts = [] self.__resultDict = {} self.__kwargs = {} pool = ThreadPool( len( self.__hosts ) ) for host in self.__hosts: pool.generateJobAndQueueIt( self.__executeClient, args = [ host, "ping" ], kwargs = {}, oCallback = self.__processResult ) pool.processAllResults() for host, result in self.__resultDict.items(): if result['OK']: sysAdminHosts.append( host ) else: self.silentHosts.append( host ) del pool self.__hosts = sysAdminHosts self.__kwargs = dict( kwargs ) self.__pool = ThreadPool( len( self.__hosts ) ) self.__resultDict = {}