def initComponents(self, configPath): """ _initComponents_ Start up the various components using the config """ if os.path.isfile(configPath): # Read the config config = loadConfigurationFile(configPath) else: msg = "No config file at desired location" logging.error(msg) raise Exception(msg) masterConfig = self.testInit.getConfiguration() config.Agent.useHeartbeat = False config.CoreDatabase.socket = masterConfig.CoreDatabase.socket config.CoreDatabase.connectUrl = masterConfig.CoreDatabase.connectUrl # Have to do this because the agent hard codes its dirs oldWorkDir = config.General.workDir for compName in (config.listComponents_() + config.listWebapps_()): component = getattr(config, compName) for var in component.listSections_(): value = getattr(component, var) if type(value) == str: if re.search(oldWorkDir, value): # Then set it setattr(component, var, value.replace(oldWorkDir, self.testDir)) elif type(value) == list: # Go through it one component at a time for element in value: if type(element) == str and re.search( oldWorkDir, element): index = value.index(element) value.remove(element) value.insert( index, value.replace(oldWorkDir, self.testDir)) setattr(component, var, value) elif type(value) == dict: for key in value.keys(): if type(value[key]) == str and re.search( oldWorkDir, value[key]): value[key] = value[key].replace( oldWorkDir, self.testDir) setattr(component, var, value) compList = (config.listComponents_() + config.listWebapps_()) components = [] config.JobStateMachine.couchurl = os.environ['COUCHURL'] config.JobStateMachine.couchDBName = self.dbName config.ACDC.couchurl = os.environ['COUCHURL'] config.ACDC.database = '%s/acdc' % self.dbName config.TaskArchiver.workloadSummaryCouchDBName = '%s/workloadsummary' % self.dbName config.TaskArchiver.workloadSummaryCouchURL = os.environ['COUCHURL'] if hasattr(config, 'WorkQueueManager'): config.WorkQueueManager.couchurl = os.environ['COUCHURL'] config.WorkQueueManager.dbname = '%s/workqueue' % self.dbName if hasattr(config, 'WorkloadSummary'): config.WorkloadSummary.couchurl = os.environ['COUCHURL'] config.WorkloadSummary.database = '%s/workloadsummary' % self.dbName # Get all components components.append(JobCreator(config=config)) components.append(JobSubmitter(config=config)) components.append(JobTracker(config=config)) components.append(JobAccountant(config=config)) components.append(JobArchiver(config=config)) components.append(TaskArchiver(config=config)) components.append(ErrorHandler(config=config)) components.append(RetryManager(config=config)) components.append(DBSUpload(config=config)) # Now the optional ones if 'PhEDExInjector' in compList: components.append(PhEDExInjector(config=config)) # Init threads: for component in components: component.initInThread() # preInitialize for component in components: component.preInitialization() for component in components: component.prepareToStop() return
def testE_FullChain(self): """ _FullChain_ Full test going through the chain; using polling cycles and everything """ return from WMComponent.JobSubmitter.JobSubmitter import JobSubmitter from WMComponent.JobStatusLite.JobStatusLite import JobStatusLite from WMComponent.JobTracker.JobTracker import JobTracker myThread = threading.currentThread() nRunning = getCondorRunningJobs(self.user) self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning)) config = self.getConfig() config.BossAir.pluginName = 'CondorPlugin' baAPI = BossAirAPI(config = config) workload = self.createTestWorkload() workloadName = "basicWorkload" changeState = ChangeState(config) nSubs = 1 nJobs = 2 cacheDir = os.path.join(self.testDir, 'CacheDir') jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs, task = workload.getTask("ReReco"), workloadSpec = os.path.join(self.testDir, 'workloadTest', workloadName), site = 'se.T2_US_UCSD') for group in jobGroupList: changeState.propagate(group.jobs, 'created', 'new') jobSubmitter = JobSubmitter(config = config) jobTracker = JobTracker(config = config) jobStatus = JobStatusLite(config = config) jobSubmitter.prepareToStart() jobTracker.prepareToStart() jobStatus.prepareToStart() # What should happen here: # 1) The JobSubmitter should submit the jobs # 2) Because of the ridiculously short time on pending jobs # the JobStatus poller should mark the jobs as done # and kill them. # 3) The JobTracker should realize there are finished jobs # # So at the end of several polling cycles, the jobs should all # be done, but be in the failed status (they timed out) time.sleep(20) myThread.workerThreadManager.terminateWorkers() getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs") result = getJobsAction.execute(state = 'Executing', jobType = "Processing") self.assertEqual(len(result), 0) result = getJobsAction.execute(state = 'JobFailed', jobType = "Processing") self.assertEqual(len(result), nJobs * nSubs) return
def testE_FullChain(self): """ _FullChain_ Full test going through the chain; using polling cycles and everything """ from WMComponent.JobSubmitter.JobSubmitter import JobSubmitter from WMComponent.JobStatusLite.JobStatusLite import JobStatusLite from WMComponent.JobTracker.JobTracker import JobTracker myThread = threading.currentThread() nRunning = getCondorRunningJobs(self.user) self.assertEqual( nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning)) config = self.getConfig() config.BossAir.pluginName = 'SimpleCondorPlugin' baAPI = BossAirAPI(config=config, insertStates=True) workload = self.createTestWorkload() workloadName = "basicWorkload" changeState = ChangeState(config) nSubs = 1 nJobs = 2 cacheDir = os.path.join(self.testDir, 'CacheDir') jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs, task=workload.getTask("ReReco"), workloadSpec=os.path.join( self.testDir, 'workloadTest', workloadName), site='se.T2_US_UCSD') for group in jobGroupList: changeState.propagate(group.jobs, 'created', 'new') jobSubmitter = JobSubmitter(config=config) jobTracker = JobTracker(config=config) jobStatus = JobStatusLite(config=config) jobSubmitter.prepareToStart() jobTracker.prepareToStart() jobStatus.prepareToStart() # What should happen here: # 1) The JobSubmitter should submit the jobs # 2) Because of the ridiculously short time on pending jobs # the JobStatus poller should mark the jobs as done # and kill them. # 3) The JobTracker should realize there are finished jobs # # So at the end of several polling cycles, the jobs should all # be done, but be in the failed status (they timed out) time.sleep(20) myThread.workerThreadManager.terminateWorkers() getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs") result = getJobsAction.execute(state='Executing', jobType="Processing") self.assertEqual(len(result), 0) result = getJobsAction.execute(state='JobFailed', jobType="Processing") self.assertEqual(len(result), nJobs * nSubs) return