def _wmAgentConfig(configFile): config = Configuration() config.section_("JobStateMachine") #Waring setting couchDB to None will cause the ERROR: # but that should be ignored, if you want to test couchDB # set the real couchDB information here config.JobStateMachine.couchurl = os.getenv("COUCHURL") config.JobStateMachine.couchDBName = os.getenv("COUCHDB") config.JobStateMachine.jobSummaryDBName = "wmagent_summary_test" config.JobStateMachine.summaryStatsDBName = "stat_summary_test" config.section_("Agent") # User specific parameter config.Agent.hostName = "cmssrv52.fnal.gov" # User specific parameter config.Agent.contact = "*****@*****.**" # User specific parameter config.Agent.teamName = "DMWM" # User specific parameter config.Agent.agentName = "WMAgentCommissioning" config.Agent.useMsgService = False config.Agent.useTrigger = False # BossAir setup config.section_("BossAir") config.BossAir.pluginNames = ['TestPlugin', 'CondorPlugin'] config.BossAir.pluginDir = 'WMCore.BossAir.Plugins' #TaskArchive setup (JobSubmitter needs this) config.component_("TaskArchiver") config.TaskArchiver.ReqMgr2ServiceURL = "https://cmsweb-dev.cern.ch/reqmgr2" saveConfigurationFile(config, configFile)
def configure(self, configfile, service): cfg = loadConfigurationFile(configfile) wconfig = cfg.section_("Webtools") app = wconfig.application appconfig = cfg.section_(app) dbsconfig = getattr(appconfig.views.active, service) databasecore = cfg.CoreDatabase # Eitehr we change formatter # OR change the 'Accept' type to application/json (which we don't know how to do at thi moment) dbsconfig.formatter.object="WMCore.WebTools.RESTFormatter" config = Configuration() config.section_("CoreDatabase") config.CoreDatabase = databasecore config.component_('DBS') config.DBS.application = app config.DBS.model = dbsconfig.model config.DBS.formatter = dbsconfig.formatter config.DBS.version = dbsconfig.version config.DBS.default_expires = 300 # DBS uses owner name, directly from app section at the moment (does not pick it from CoreDatabse) config.DBS.dbowner = databasecore.dbowner # Add the CoreDatabase section to DBS config.DBS.database = config.CoreDatabase return config
class AlertProcessorTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging(logLevel = logging.DEBUG) self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.Agent.Database', "WMCore.ResourceControl"], useDefault = False) self.testDir = self.testInit.generateWorkDir() self.config = Configuration() self.config.section_("Agent") self.config.Agent.useMsgService = False self.config.Agent.useTrigger = False self.config.component_("AlertProcessor") self.config.AlertProcessor.componentDir = self.testDir self.config.AlertProcessor.address = "tcp://127.0.0.1:5557" self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559" self.config.section_("CoreDatabase") self.config.CoreDatabase.socket = os.environ.get("DBSOCK") self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE") self.config.AlertProcessor.section_("critical") self.config.AlertProcessor.section_("soft") self.config.AlertProcessor.critical.level = 5 self.config.AlertProcessor.soft.level = 0 self.config.AlertProcessor.soft.bufferSize = 3 self.config.AlertProcessor.critical.section_("sinks") self.config.AlertProcessor.soft.section_("sinks") def tearDown(self): self.testInit.clearDatabase() self.testInit.delWorkDir() def testAlertProcessorBasic(self): alertProcessor = AlertProcessor(self.config) try: # alertProcessor.startComponent() causes the flow to stop, Harness.py # the method just calls prepareToStart() and waits for ever # alertProcessor.startDaemon() no good for this either ... puts everything # on background alertProcessor.prepareToStart() except Exception, ex: print ex self.fail(str(ex)) logging.debug("AlertProcessor and its sub-components should be running now ...") logging.debug("Going to stop the component ...") # stop via component method try: alertProcessor.stopAlertProcessor() except Exception, ex: print ex self.fail(str(ex))
def _wmAgentConfig(configFile): config = Configuration() config.section_("JobStateMachine") # Waring setting couchDB to None will cause the ERROR: # but that should be ignored, if you want to test couchDB # set the real couchDB information here config.JobStateMachine.couchurl = os.getenv("COUCHURL") config.JobStateMachine.couchDBName = os.getenv("COUCHDB") config.JobStateMachine.jobSummaryDBName = "wmagent_summary_test" config.JobStateMachine.summaryStatsDBName = "stat_summary_test" config.section_("Agent") # User specific parameter config.Agent.hostName = "cmssrv52.fnal.gov" # User specific parameter config.Agent.contact = "*****@*****.**" # User specific parameter config.Agent.teamName = "DMWM" # User specific parameter config.Agent.agentName = "WMAgentCommissioning" config.Agent.useMsgService = False config.Agent.useTrigger = False # BossAir setup config.section_("BossAir") config.BossAir.pluginNames = ['TestPlugin', 'SimpleCondorPlugin'] config.BossAir.pluginDir = 'WMCore.BossAir.Plugins' # TaskArchive setup (JobSubmitter needs this) config.component_("TaskArchiver") config.TaskArchiver.ReqMgr2ServiceURL = "https://cmsweb-dev.cern.ch/reqmgr2" saveConfigurationFile(config, configFile)
class AlertProcessorTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging(logLevel=logging.DEBUG) self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=[ "WMCore.WMBS", 'WMCore.Agent.Database', "WMCore.ResourceControl" ], useDefault=False) self.testDir = self.testInit.generateWorkDir() self.config = Configuration() self.config.section_("Agent") self.config.Agent.useMsgService = False self.config.Agent.useTrigger = False self.config.component_("AlertProcessor") self.config.AlertProcessor.componentDir = self.testDir self.config.AlertProcessor.address = "tcp://127.0.0.1:5557" self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559" self.config.section_("CoreDatabase") self.config.CoreDatabase.socket = os.environ.get("DBSOCK") self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE") self.config.AlertProcessor.section_("critical") self.config.AlertProcessor.section_("soft") self.config.AlertProcessor.critical.level = 5 self.config.AlertProcessor.soft.level = 0 self.config.AlertProcessor.soft.bufferSize = 3 self.config.AlertProcessor.critical.section_("sinks") self.config.AlertProcessor.soft.section_("sinks") def tearDown(self): self.testInit.clearDatabase() self.testInit.delWorkDir() def testAlertProcessorBasic(self): alertProcessor = AlertProcessor(self.config) try: # alertProcessor.startComponent() causes the flow to stop, Harness.py # the method just calls prepareToStart() and waits for ever # alertProcessor.startDaemon() no good for this either ... puts everything # on background alertProcessor.prepareToStart() except Exception as ex: print ex self.fail(str(ex)) logging.debug( "AlertProcessor and its sub-components should be running now ...") logging.debug("Going to stop the component ...") # stop via component method try: alertProcessor.stopAlertProcessor() except Exception as ex: print ex self.fail(str(ex))
def configure(self, configfile, service, dbinstance): cfg = loadConfigurationFile(configfile) wconfig = cfg.section_("Webtools") app = wconfig.application appconfig = cfg.section_(app) dbsconfig = getattr(appconfig.views.active, service) # Either we change formatter # OR change the 'Accept' type to application/json (which we don't know how to do at the moment) dbsconfig.formatter.object="WMCore.WebTools.RESTFormatter" config = Configuration() config.component_('SecurityModule') config.SecurityModule.dangerously_insecure = True config.component_('DBS') config.DBS.application = app config.DBS.model = dbsconfig.model config.DBS.formatter = dbsconfig.formatter #Does not support instances #config.DBS.instances = cfg.dbs.instances #config.DBS.database = dbsconfig.database if self.migration_test: #Use one specific database cms_dbs3_dev_phys02@int2r for migration unittests from DBSSecrets import dbs3_dp2_i2 config.DBS.section_('database') config.DBS.database.connectUrl = dbs3_dp2_i2['connectUrl']['writer'] config.DBS.database.dbowner = dbs3_dp2_i2['databaseOwner'] config.DBS.database.engineParameters = { 'pool_size' : 15, 'max_overflow' : 10, 'pool_timeout' : 200 } version = getattr(dbsconfig.database.instances, dbinstance).version config.DBS.database.version = version if version else '3.99.98' config.DBS.section_('security') config.DBS.security.params = {} else: #Use dev/global from dbs configuration for the reader, writer and dao unittests dbconfig = getattr(dbsconfig.database.instances, dbinstance) config.DBS.section_('database') config.DBS.database.connectUrl = dbconfig.connectUrl config.DBS.database.dbowner = dbconfig.dbowner config.DBS.database.engineParameters = dbconfig.engineParameters config.DBS.database.version = dbconfig.version if dbconfig.version else '3.99.98' #config.DBS.database.instance = dbconfig.instance try: secconfig = getattr(dbsconfig.security.instances, dbinstance) except AttributeError: pass else: config.DBS.section_('security') config.DBS.security.params = secconfig.params config.DBS.default_expires = 900 return config
def testC(self): """add components""" config = Configuration() config.component_("Component1") config.component_("Component2") config.component_("Component3") comp1 = getattr(config, "Component1", None) self.failUnless(comp1 != None) comp2 = getattr(config, "Component2", None) self.failUnless(comp2 != None)
def testC(self): """add components""" config = Configuration() config.component_("Component1") config.component_("Component2") config.component_("Component3") comp1 = getattr(config, "Component1", None) self.assertTrue(comp1 != None) comp2 = getattr(config, "Component2", None) self.assertTrue(comp2 != None)
def getConfig(self): """ _getConfig_ Get a test configuration for the JobUpdater tests """ config = Configuration() config.section_('Agent') config.Agent.agentName = 'testAgent' config.section_('CoreDatabase') config.CoreDatabase.connectUrl = os.environ['DATABASE'] config.CoreDatabase.socket = os.getenv('DBSOCK') # JobTracker config.component_('JobUpdater') config.JobUpdater.reqMgrUrl = 'https://cmsweb-dev.cern.ch/reqmgr/reqMgr' # JobStateMachine config.section_('JobStateMachine') config.JobStateMachine.couchDBName = 'bogus' # BossAir config.section_('BossAir') config.BossAir.pluginNames = ['MockPlugin'] config.BossAir.pluginDir = 'WMCore.BossAir.Plugins' config.BossAir.multicoreTaskTypes = ['MultiProcessing', 'MultiProduction'] config.BossAir.nCondorProcesses = 1 config.BossAir.section_('MockPlugin') config.BossAir.MockPlugin.fakeReport = os.path.join(getTestBase(), 'WMComponent_t/JobAccountant_t/fwjrs', 'MergedSkimSuccess.pkl') # WorkQueue config.component_('WorkQueueManager') config.WorkQueueManager.couchurl = os.environ['COUCHURL'] config.WorkQueueManager.dbname = 'workqueue_t' config.WorkQueueManager.inboxDatabase = 'workqueue_inbox_t' return config
def workqueueConfig(couchdb = localCouchDb): """ Returns an usable workqueue config """ config = Configuration() config.section_("Agent") config.Agent.hostName = gethostname() config.component_("WorkQueueManager") config.WorkQueueManager.namespace = "WMComponent.WorkQueueManager.WorkQueueManager" config.WorkQueueManager.couchurl = couchdb config.WorkQueueManager.dbname = workqueueDBName config.WorkQueueManager.wmstatDBName = wmstatsDBName config.WorkQueueManager.inboxDatabase = workqueueInboxDbName config.WorkQueueManager.level = "GlobalQueue" config.WorkQueueManager.queueParams = {'WMStatsCouchUrl': "%s/%s" % (couchdb, wmstatsDBName)} config.WorkQueueManager.queueParams['QueueURL'] = '%s/%s' % (couchdb, workqueueDBName) config.WorkQueueManager.reqMgrConfig = {} config.WorkQueueManager.reqMgrConfig['endpoint'] = reqMgrEndpoint return config
def testE(self): """test save/load """ testValues = [ "string", 123, 123.456, ["list", 789, 10.1 ], { "dict1" : "value", "dict2" : 10.0 } ] config = Configuration() for x in range(0, 5): config.section_("Section%s" % x) config.component_("Component%s" % x) sect = getattr(config, "Section%s" % x) comp = getattr(config, "Component%s" % x) sect.document_("This is Section%s" % x) comp.document_("This is Component%s" % x) for i in range(0, 5): setattr(comp, "Parameter%s" % i, testValues[i]) setattr(sect, "Parameter%s" % i, testValues[i]) comp.document_("This is Parameter%s" % i, "Parameter%s" %i) sect.document_("This is Parameter%s" %i, "Parameter%s" %i) dummyStringSave = str(config) dummyDocumentSave = config.documentedString_() dummyCommentSave = config.commentedString_() saveConfigurationFile(config, self.normalSave) saveConfigurationFile(config, self.docSave, document = True) saveConfigurationFile(config, self.commentSave, comment = True) dummyPlainConfig = loadConfigurationFile(self.normalSave) dummyDocConfig = loadConfigurationFile(self.docSave) dummyCommentConfig = loadConfigurationFile(self.commentSave)
def testE(self): """test save/load """ testValues = [ "string", 123, 123.456, ["list", 789, 10.1 ], { "dict1" : "value", "dict2" : 10.0 } ] config = Configuration() for x in range(0, 5): config.section_("Section%s" % x) config.component_("Component%s" % x) sect = getattr(config, "Section%s" % x) comp = getattr(config, "Component%s" % x) sect.document_("This is Section%s" % x) comp.document_("This is Component%s" % x) for i in range(0, 5): setattr(comp, "Parameter%s" % i, testValues[i]) setattr(sect, "Parameter%s" % i, testValues[i]) comp.document_("This is Parameter%s" % i, "Parameter%s" %i) sect.document_("This is Parameter%s" %i, "Parameter%s" %i) stringSave = str(config) documentSave = config.documentedString_() commentSave = config.commentedString_() saveConfigurationFile(config, self.normalSave) saveConfigurationFile(config, self.docSave, document = True) saveConfigurationFile(config, self.commentSave, comment = True) plainConfig = loadConfigurationFile(self.normalSave) docConfig = loadConfigurationFile(self.docSave) commentConfig = loadConfigurationFile(self.commentSave)
def _generate_config(self, view, globalconf, instance=None, is_index=False): """ _generate_config_ Create the configuration for a page by combining it's configuration with the global one """ config = Configuration() view_config = config.component_(view._internal_name) view_config.application = self.app view_dict = view.dictionary_() for k in globalconf: # Add the global config to the view view_config.__setattr__(k, globalconf[k]) for k in view_dict: # overwrite global if the local config is different view_config.__setattr__(k, view_dict[k]) # TODO: remove bits we don't need if instance: # record the instance into the view's configuration view_config.instance = instance if hasattr(view, 'database') and hasattr(view.database, 'instances'): db_cfg = view.database.section_('instances') view_config.section_('database') view_config.database = db_cfg.section_(instance) if hasattr(view, 'security') and hasattr(view.security, 'instances'): security_cfg = view.security.section_('instances') view_config.section_('security') view_config.security = security_cfg.section_(instance) if 'database' in view_config.dictionary_(): if not isinstance(view_config.database, (str, bytes)): if len(view_config.database.listSections_()) == 0: if len(self.coreDatabase.listSections_()) > 0: view_config.database.connectUrl = self.coreDatabase.connectUrl if hasattr(self.coreDatabase, "socket"): view_config.database.socket = self.coreDatabase.socket return view_config
def _generate_config(self, view, globalconf, instance=None, is_index=False): """ _generate_config_ Create the configuration for a page by combining it's configuration with the global one """ config = Configuration() view_config = config.component_(view._internal_name) view_config.application = self.app view_dict = view.dictionary_() for k in globalconf.keys(): # Add the global config to the view view_config.__setattr__(k, globalconf[k]) for k in view_dict.keys(): # overwrite global if the local config is different view_config.__setattr__(k, view_dict[k]) # TODO: remove bits we don't need if instance: # record the instance into the view's configuration view_config.instance = instance if hasattr(view, 'database') and hasattr(view.database, 'instances'): db_cfg = view.database.section_('instances') view_config.section_('database') view_config.database = db_cfg.section_(instance) if hasattr(view, 'security') and hasattr(view.security, 'instances'): security_cfg = view.security.section_('instances') view_config.section_('security') view_config.security = security_cfg.section_(instance) if 'database' in view_config.dictionary_(): if not isinstance(view_config.database, basestring): if len(view_config.database.listSections_()) == 0: if len(self.coreDatabase.listSections_()) > 0: view_config.database.connectUrl = self.coreDatabase.connectUrl if hasattr(self.coreDatabase, "socket"): view_config.database.socket = self.coreDatabase.socket return view_config
def getBaseConfiguration(self): config = Configuration() config.component_('SecurityModule') config.SecurityModule.dangerously_insecure = True config.component_('Webtools') config.Webtools.application = 'UnitTests' config.Webtools.log_screen = False config.Webtools.environment = "development" config.Webtools.error_log_level = logging.WARNING config.Webtools.access_log_level = logging.DEBUG config.Webtools.host = "127.0.0.1" config.component_('UnitTests') config.UnitTests.admin = "Mr Unit Test" config.UnitTests.description = "Dummy server for unit tests" config.UnitTests.title = "Unit Tests" config.UnitTests.section_('views') active = config.UnitTests.views.section_('active') return config
def getBaseConfiguration(self): config = Configuration() config.component_('SecurityModule') config.SecurityModule.dangerously_insecure = True config.component_('Webtools') config.Webtools.application = 'UnitTests' config.Webtools.log_screen = False config.Webtools.environment = "development" config.Webtools.error_log_level = logging.WARNING config.Webtools.access_log_level = logging.DEBUG config.Webtools.host = "127.0.0.1" config.component_('UnitTests') config.UnitTests.admin = "Mr Unit Test" config.UnitTests.description = "Dummy server for unit tests" config.UnitTests.title = "Unit Tests" config.UnitTests.section_('views') active = config.UnitTests.views.section_('active') return config
######################## # the default config should work out of the box with minimal change # Under the '## User specific parameter' line need to be changed to make the config correctly ######################## from WMCore.Configuration import Configuration from os import environ, path import WMCore.WMInit config = Configuration() config.component_('Webtools') config.Webtools.application = 'AgentMonitoring' config.component_('AgentMonitoring') config.AgentMonitoring.templates = path.join( WMCore.WMInit.getWMBASE(), 'src/templates/WMCore/WebTools' ) ## User specific parameter: config.AgentMonitoring.admin = '*****@*****.**' config.AgentMonitoring.title = 'WMAgent Monitoring' config.AgentMonitoring.description = 'Monitoring of a WMAgentMonitoring' config.AgentMonitoring.section_('views') # These are all the active pages that Root.py should instantiate active = config.AgentMonitoring.views.section_('active') wmagent = active.section_('wmagent') # The class to load for this view/page wmagent.object = 'WMCore.WebTools.RESTApi' wmagent.templates = path.join( WMCore.WMInit.getWMBASE(), 'src/templates/WMCore/WebTools/') wmagent.section_('database') ## User specific parameter: wmagent.database.connectUrl = 'mysql://metson@localhost/wmagent'
config.BossAir.pluginNames = bossAirPlugins config.BossAir.nCondorProcesses = 1 config.BossAir.multicoreTaskTypes = ["MultiProcessing", "MultiProduction"] config.BossAir.submitWMSMode = True config.BossAir.acctGroup = glideInAcctGroup config.BossAir.acctGroupUser = glideInAcctGroupUser config.section_("CoreDatabase") config.CoreDatabase.connectUrl = databaseUrl #config.CoreDatabase.socket = databaseSocket config.section_("DashboardReporter") config.DashboardReporter.dashboardHost = "cms-wmagent-job.cern.ch" config.DashboardReporter.dashboardPort = 8884 config.component_('WorkQueueManager') config.WorkQueueManager.namespace = "WMComponent.WorkQueueManager.WorkQueueManager" config.WorkQueueManager.componentDir = config.General.workDir + "/WorkQueueManager" config.WorkQueueManager.level = 'LocalQueue' config.WorkQueueManager.logLevel = globalLogLevel config.WorkQueueManager.couchurl = couchURL config.WorkQueueManager.dbname = workqueueDBName config.WorkQueueManager.inboxDatabase = workqueueInboxDbName config.WorkQueueManager.queueParams = {} config.WorkQueueManager.queueParams["ParentQueueCouchUrl"] = "https://cmsweb.cern.ch/couchdb/workqueue" # this has to be unique for different work queue. This is just place holder config.WorkQueueManager.queueParams["QueueURL"] = "http://%s:5984" % (config.Agent.hostName) config.component_("DBS3Upload") config.DBS3Upload.namespace = "WMComponent.DBS3Buffer.DBS3Upload" config.DBS3Upload.componentDir = config.General.workDir + "/DBS3Upload"
from WMCore.Configuration import Configuration config = Configuration() # web_server configuration config.component_('web_server') config.web_server.thread_pool = 150 config.web_server.socket_queue_size = 100 config.web_server.timeout_monitor = False config.web_server.loglevel = 0 config.web_server.host = '0.0.0.0' config.web_server.log_screen = True config.web_server.url_base = '/das' config.web_server.logfile = '' config.web_server.port = 8211 config.web_server.pid = '%s/state/das/das_cli_server.pid' % __file__.rsplit( '/', 4)[0] config.web_server.status_update = 2500 config.web_server.web_workers = 50 config.web_server.queue_limit = 1000 config.web_server.qtype = 'Queue' config.web_server.adjust_input = True config.web_server.dbs_daemon = False # disable DBS daemon, it will run in das web server config.web_server.hot_threshold = 3000 config.web_server.services = ['dbs_phedex'] config.web_server.check_clients = False # set True when DAS ready # cache requests configuration config.component_('cacherequests') config.cacherequests.Admin = 50 config.cacherequests.Unlimited = 10000
#!/usr/bin/env python """ _ACDCConfig_ ACDC config for a development deployment """ from WMCore.Configuration import Configuration acdcDatabase = "acdcserver" HOST = "cmsweb-dev.cern.ch" COUCH = "https://%s/couchdb" % HOST # Nothing after this point should need to be changed. config = Configuration() config.section_("Agent") config.Agent.hostName = HOST config.component_("ACDC") config.ACDC.couchurl = COUCH config.ACDC.dbname = acdcDatabase config.ACDC.cleaningInterval = 30
DBS.section_('model') DBS.model.object = 'dbs.web.DBSWriterModel' DBS.section_('formatter') active.DBS.formatter.object = 'WMCore.WebTools.RESTFormatter' #Migration server page/view MIGRATE = active.section_('MIGRATE') MIGRATE.object = 'WMCore.WebTools.RESTApi' MIGRATE.section_('model') MIGRATE.model.object = 'dbs.web.DBSMigrateModel' MIGRATE.section_('formatter') MIGRATE.formatter.object = 'WMCore.WebTools.RESTFormatter' MIGRATE.version = DBSVersion MIGRATE.nthreads = 4 #config migration mover config.component_('DBSMigrationMover') config.DBSMigrationMover.default_expires=300 config.DBSMigrationMover.pollInterval = 1 config.DBSMigrationMover.namespace= "dbs.components.migration.DBSMigrationMover" config.DBSMigrationMover.componentDir = config.General.workDir + "/Logs/MigrationMover" config.DBSMigrationMover.workerThreads = 1 #Config insert buffer config.component_('DBSInsertBuffer') config.DBSInsertBuffer.default_expires=300 config.DBSInsertBuffer.pollInterval = 1 config.DBSInsertBuffer.namespace= "dbs.components.insertbuffer.DBSInsertBuffer" config.DBSInsertBuffer.componentDir = config.General.workDir + "Logs/DBSInsertBuffer" config.DBSInsertBuffer.workerThreads = 1
from WMCore.Configuration import Configuration from WMCore.WMBase import getWMBASE import os.path from os import environ config = Configuration() # This component has all the configuration of CherryPy config.component_('Webtools') # This is the application config.Webtools.port = 8201 # INADDR_ANY: listen on all interfaces (be visible outside of localhost) config.Webtools.host = '0.0.0.0' config.Webtools.application = 'FileMover' # This is the config for the application config.component_('FileMover') # Define the default location for templates for the app config.FileMover.templates = environ[ 'FILEMOVER_ROOT'] + '/lib/python2.6/site-packages/fm/web/templates' config.FileMover.admin = '*****@*****.**' config.FileMover.title = 'CMS FileMover Documentation' config.FileMover.description = 'Documentation on the FileMover' config.FileMover.index = "FileMover" # phedex section phedex = config.FileMover.section_('phedex') phedex.url = 'https://cmsweb.cern.ch/phedex/datasvc/json/prod'
def reqMgrConfig( componentDir = basedir + "/var", installation = os.environ["WMCORE_ROOT"], port = 8240, user = None, reqMgrHost = "http://%s:%d" % (socket.gethostname().lower(), 8240), proxyBase = None, couchurl = os.getenv("COUCHURL"), sitedb = 'https://cmsweb.cern.ch/sitedb/json/index/CEtoCMSName?name', dbs3 = 'http://vocms09.cern.ch:8989/dbs', yuiroot = "/reqmgr/yuiserver/yui", configCouchDB = 'reqmgr_config_cache', workloadCouchDB = 'reqmgr_workload_cache', workloadSummaryCouchDB = "workloadsummary", wmstatCouchDB = "wmstats", connectURL = None, startup = "Root.py", addMonitor = True): config = Configuration() reqMgrHtml = os.path.join(installation, 'data/html/RequestManager') reqMgrTemplates = os.path.join(installation, 'data/templates/WMCore/WebTools/RequestManager') reqMgrJavascript = os.path.join(installation, 'data/javascript') globalOverviewTemplates = os.path.join(installation, 'data/templates/WMCore/WebTools/GlobalMonitor') globalOverviewJavascript = reqMgrJavascript globalOverviewHtml = os.path.join(installation, 'data/html') if startup == "Root.py": config.component_("Webtools") config.Webtools.host = '0.0.0.0' config.Webtools.port = port config.Webtools.application = "reqmgr" if(proxyBase): config.Webtools.proxy_base = proxy_base config.Webtools.environment = 'production' config.component_('reqmgr') from ReqMgrSecrets import connectUrl config.section_("CoreDatabase") #read from Secrets file config.CoreDatabase.connectUrl = connectUrl config.reqmgr.section_('database') config.reqmgr.database.connectUrl = connectUrl else: config.webapp_("reqmgr") config.reqmgr.Webtools.host = '0.0.0.0' config.reqmgr.Webtools.port = port config.reqmgr.Webtools.environment = 'devel' config.reqmgr.database.connectUrl = connectURL config.reqmgr.componentDir = componentDir config.reqmgr.templates = reqMgrTemplates config.reqmgr.html = reqMgrHtml config.reqmgr.javascript = reqMgrJavascript config.reqmgr.admin = '*****@*****.**' config.reqmgr.title = 'CMS Request Manager' config.reqmgr.description = 'CMS Request Manager' config.reqmgr.couchUrl = couchurl config.reqmgr.configDBName = configCouchDB config.reqmgr.workloadDBName = workloadCouchDB config.reqmgr.wmstatDBName = wmstatCouchDB config.reqmgr.security_roles = ['Admin', 'Developer', 'Data Manager', 'developer', 'admin', 'data-manager'] config.reqmgr.yuiroot = yuiroot config.reqmgr.dbs3=dbs3 views = config.reqmgr.section_('views') active = views.section_('active') active.section_('view') active.view.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrBrowser' active.section_('admin') active.admin.object = 'WMCore.HTTPFrontEnd.RequestManager.Admin' active.section_('approve') active.approve.object = 'WMCore.HTTPFrontEnd.RequestManager.Approve' active.section_('assign') active.assign.object = 'WMCore.HTTPFrontEnd.RequestManager.Assign' active.assign.sitedb = sitedb active.section_('closeout') active.closeout.object = 'WMCore.HTTPFrontEnd.RequestManager.CloseOut' active.section_('announce') active.announce.object = 'WMCore.HTTPFrontEnd.RequestManager.Announce' active.section_('reqMgr') active.reqMgr.section_('model') active.reqMgr.section_('formatter') active.reqMgr.object = 'WMCore.WebTools.RESTApi' active.reqMgr.model.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrRESTModel' active.reqMgr.default_expires = 0 # no caching active.reqMgr.formatter.object = 'WMCore.WebTools.RESTFormatter' active.reqMgr.templates = os.path.join(installation, 'data/templates/WMCore/WebTools') #deprecate the old interface active.section_('rest') active.rest.section_('model') active.rest.section_('formatter') active.rest.object = 'WMCore.WebTools.RESTApi' active.rest.model.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrRESTModel' active.rest.default_expires = 0 # no caching active.rest.formatter.object = 'WMCore.WebTools.RESTFormatter' active.rest.templates = os.path.join(installation, 'data/templates/WMCore/WebTools') active.section_('create') active.create.object = 'WMCore.HTTPFrontEnd.RequestManager.WebRequestSchema' active.create.requestor = user active.create.cmsswDefaultVersion = 'CMSSW_5_2_5' if addMonitor: active.section_('GlobalMonitor') active.GlobalMonitor.object = 'WMCore.HTTPFrontEnd.GlobalMonitor.GlobalMonitorPage' active.GlobalMonitor.templates = globalOverviewTemplates active.GlobalMonitor.javascript = globalOverviewJavascript active.GlobalMonitor.html = globalOverviewHtml active.GlobalMonitor.serviceLevel = 'RequestManager' active.section_('monitorSvc') active.monitorSvc.serviceURL = "%s/reqmgr/reqMgr" % reqMgrHost active.monitorSvc.serviceLevel = active.GlobalMonitor.serviceLevel active.monitorSvc.workloadSummaryCouchURL = "%s/%s" % (couchurl, workloadSummaryCouchDB) active.monitorSvc.section_('model') active.monitorSvc.section_('formatter') active.monitorSvc.object = 'WMCore.WebTools.RESTApi' active.monitorSvc.model.object = 'WMCore.HTTPFrontEnd.GlobalMonitor.GlobalMonitorRESTModel' active.monitorSvc.default_expires = 0 # no caching active.monitorSvc.formatter.object = 'WMCore.WebTools.RESTFormatter' active.monitorSvc.template = os.path.join(installation, 'data/templates/WMCore/WebTools') active.section_('yuiserver') active.yuiserver.object = 'WMCore.WebTools.YUIServer' active.yuiserver.yuidir = os.getenv("YUI_ROOT") return config
class ProcessorTest(unittest.TestCase): """ TestCase for Processor. """ def setUp(self): """ Set up for tests. """ self.addr = "tcp://127.0.0.1:5557" self.ctrl = "tcp://127.0.0.1:5559" self.softOutputFile = "/tmp/ProcessorTestSoftAlerts.json" self.criticalOutputFile = "/tmp/ProcessorTestCriticalAlerts.json" self.config = Configuration() self.config.component_("AlertProcessor") self.config.AlertProcessor.section_("critical") self.config.AlertProcessor.section_("soft") self.config.AlertProcessor.critical.level = 5 self.config.AlertProcessor.soft.level = 1 self.config.AlertProcessor.soft.bufferSize = 3 self.config.AlertProcessor.critical.section_("sinks") self.config.AlertProcessor.soft.section_("sinks") def tearDown(self): for f in (self.criticalOutputFile, self.softOutputFile): if os.path.exists(f): os.remove(f) if hasattr(self, "testInit"): self.testInit.tearDownCouch() def testProcessorBasic(self): str(self.config.AlertProcessor) p = Processor(self.config.AlertProcessor) def testProcessorWithReceiver(self): """ Test startup and shutdown of processor in receiver. """ processor = Processor(self.config.AlertProcessor) rec = Receiver(self.addr, processor, self.ctrl) rec.startReceiver() # non-blocking call # now sender tests control messages (register, unregister, shutdown) s = Sender(self.addr, self.ctrl, "Processor_t") s.register() s.unregister() s.sendShutdown() # wait until the Receiver is shut by sending the above control messages while rec.isReady(): time.sleep(0.3) print "%s waiting for Receiver to shut ..." % inspect.stack()[0][3] def testProcessorWithReceiverAndFileSink(self): # add corresponding part of the configuration for FileSink(s) config = self.config.AlertProcessor config.critical.sinks.section_("file") config.critical.sinks.file.outputfile = self.criticalOutputFile config.soft.sinks.section_("file") config.soft.sinks.file.outputfile = self.softOutputFile processor = Processor(config) rec = Receiver(self.addr, processor, self.ctrl) rec.startReceiver() # non blocking call # run worker(), this time directly without Process as above, # worker will send 10 Alerts to Receiver worker(self.addr, self.ctrl, 10) # wait until the Receiver is shut by worker while rec.isReady(): time.sleep(0.3) print "%s waiting for Receiver to shut ..." % inspect.stack()[0][3] # now check the FileSink output files for content: # the soft Alerts has threshold level set to 0 so Alerts # with level 1 and higher, resp. for critical the level # was above set to 5 so 6 and higher out of worker's 0 .. 9 # (10 Alerts altogether) shall be present softSink = FileSink(config.soft.sinks.file) criticalSink = FileSink(config.critical.sinks.file) softList = softSink.load() criticalList = criticalSink.load() # check soft level alerts # levels 1 .. 4 went in (level 0 is, according to the config not considered) self.assertEqual(len(softList), 3) for a, level in zip(softList, range(1, 4)): self.assertEqual(a["Level"], level) # check 'critical' levels # only levels 5 .. 9 went in self.assertEqual(len(criticalList), 5) for a, level in zip(criticalList, range(5, 10)): self.assertEqual(a["Level"], level) def testProcessorWithReceiverAndCouchSink(self): # set up couch first self.testInit = TestInitCouchApp(__file__) self.testInit.setLogging() dbName = "couch_sink" self.testInit.setupCouch(dbName) # add corresponding part of the configuration for CouchSink(s) config = self.config.AlertProcessor config.critical.sinks.section_("couch") config.critical.sinks.couch.url = self.testInit.couchUrl config.critical.sinks.couch.database = self.testInit.couchDbName # just send the Alert into couch processor = Processor(config) rec = Receiver(self.addr, processor, self.ctrl) rec.startReceiver() # non blocking call # run worker(), this time directly without Process as above, # worker will send 10 Alerts to Receiver worker(self.addr, self.ctrl, 10) # wait until the Receiver is shut by worker while rec.isReady(): time.sleep(0.3) print "%s waiting for Receiver to shut ..." % inspect.stack()[0][3]
# instance name : connecturls, {reader needed roles, writer needed roles} db_mapping = { 'int/global': [ dbs3_secrets, { 'reader': {}, 'writer': { 'dbs': 'operator', 'dataops': 'production-operator' } } ] } config = Configuration() config.component_('Webtools') config.Webtools.port = 8252 config.Webtools.thread_pool = 15 config.Webtools.log_screen = False config.Webtools.proxy_base = 'True' config.Webtools.application = 'dbs' config.Webtools.environment = 'production' config.component_('database') config.database.connectUrl = dbs3_secrets['connectUrl'] config.database.dbowner = dbs3_secrets['databaseOwner'] config.component_('dbs') config.dbs.title = 'DBS Server' config.dbs.description = 'CMS DBS Service' config.dbs.section_('views')
cache_area = "url_to_CS_cache" ui_script = '/afs/cern.ch/cms/LCG/LCG-2/UI/cms_ui_env.sh' config = Configuration() config.section_('General') config.General.workDir = workDirectory config.section_("CoreDatabase") config.CoreDatabase.connectUrl = databaseUrl config.section_("Agent") config.Agent.contact = userEmail config.Agent.agentName = agentName config.Agent.hostName = serverHostName config.Agent.teamName = teamName config.component_("AsyncTransfer") config.AsyncTransfer.logLevel = 'INFO' # used when initializing the component (by WMCore Harness class) config.AsyncTransfer.log_level = logging.INFO # used when starting the daemon (by ASO); overrides logLevel config.AsyncTransfer.logMsgFormat = "%(asctime)s:%(levelname)s:%(module)s:%(name)s: %(message)s" config.AsyncTransfer.namespace = "AsyncStageOut.AsyncTransfer" config.AsyncTransfer.componentDir = config.General.workDir config.AsyncTransfer.pollInterval = 10 config.AsyncTransfer.pollViewsInterval = 10 config.AsyncTransfer.couch_instance = couchUrl config.AsyncTransfer.files_database = files_database config.AsyncTransfer.requests_database = requests_database config.AsyncTransfer.data_source = couchURLSource config.AsyncTransfer.db_source = database_src config.AsyncTransfer.pluginName = "CentralMonitoring" config.AsyncTransfer.pluginDir = "AsyncStageOut.Plugins" config.AsyncTransfer.max_files_per_transfer = 100
from WMCore.Configuration import Configuration config = Configuration() users = config.component_('Users') users.section_('diego') users.diego.fullname = 'Diego Gomes' users.diego.section_('permissions') users.diego.permissions.section_('Admin') users.diego.permissions.Admin = 'T2_BR_UERJ' users.diego.permissions.section_('Developer') users.diego.permissions.Developer = 'CMS DMWM' # diego's password is password123 users.diego.password = '******' users.diego.dn = '/DC=org/DC=doegrids/OU=People/CN=Diego da Silva Gomes 849253' users.section_('simon') users.simon.fullname = 'Simon Metson' users.simon.section_('permissions') users.simon.permissions.section_('Admin') users.simon.permissions.Admin = ['T1_CH_CERN', 'T2_UK_SGrid'] users.simon.permissions.section_('L2') users.simon.permissions.L2 = 'CMS DMWM' # simon's password is password users.simon.password = '******' users.simon.dn = '/grid/cms/cern/simon'
from WMCore.Configuration import Configuration config = Configuration() # web_server configuration config.component_('web_server') config.web_server.thread_pool = 30 config.web_server.socket_queue_size = 15 config.web_server.loglevel = 0 config.web_server.host = '0.0.0.0' config.web_server.log_screen = True config.web_server.url_base = '/das' config.web_server.logfile = '' config.web_server.port = 8212 config.web_server.pid = '%s/state/das/das_web_server.pid' % __file__.rsplit( '/', 4)[0] config.web_server.status_update = 2500 config.web_server.web_workers = 50 config.web_server.queue_limit = 1000 config.web_server.qtype = 'Queue' config.web_server.adjust_input = True config.web_server.dbs_daemon = True config.web_server.dbs_daemon_interval = 600 config.web_server.dbs_daemon_expire = 3600 config.web_server.hot_threshold = 3000 config.web_server.onhold_daemon = True config.web_server.services = ['dbs_phedex'] # cache requests configuration config.component_('cacherequests') config.cacherequests.Admin = 50
config = Configuration() # This is the Security config the application will use #config.component_('SecurityModule') #config.SecurityModule.enabled = True #config.SecurityModule.oid_server = 'http://*****:*****@gmail.com'
'prod/phys02': 1, 'prod/phys03': 1, 'prod/caf' : 1, 'prod/test' : 1, 'int/global' : 1, 'int/phys01' : 1, 'int/phys02' : 1, 'int/phys03' : 1, 'dev/global' : 1, 'dev/phys01' : 1, 'dev/phys02' : 1, 'dev/phys03' : 1} config = Configuration() config.component_('web') config.web.host = "127.0.0.1" config.web.port = 8251 config.web.log_screen = True config.web.thread_pool = 50 config.component_('dbsmigration') config.dbsmigration.instances = view_mapping[VARIANT]['DBSMigrate'] config.dbsmigration.section_('database') db_instances = config.dbsmigration.database.section_('instances') for instance_name in config.dbsmigration.instances: db_config_section = db_instances.section_(instance_name) db_config_section.threads = thread_mapping[instance_name] db_config_section.dbowner = db_mapping[instance_name]['databaseOwner'] db_config_section.connectUrl = db_mapping[instance_name]['connectUrl']['writer'] db_config_section.engineParameters = {'pool_size': 15, 'max_overflow': 10, 'pool_timeout': 200}
sitedb = 'https://cmsweb.cern.ch/sitedb/json/index/CEtoCMSName?name' dbs3 = 'http://vocms09.cern.ch:8989/dbs' yuiroot = "/an_reqmgr/yuiserver/yui" INSTALL = getWMBASE() TEMPLATES = os.path.normpath(os.path.join(INSTALL, '../../../data/templates/WMCore/WebTools')) JAVASCRIPT_PATH = os.path.normpath(os.path.join(INSTALL, '../../../data/javascript')) HTML_PATH = os.path.normpath(os.path.join(INSTALL, '../../../data/html')) SECURITY_ROLES = ['Admin', 'Developer', 'Data Manager', 'developer', 'admin', 'data-manager', 'facops', 'FacOps'] # # Beginning of the boilerplate configuration bits # config = Configuration() config.component_("Webtools") config.Webtools.host = '0.0.0.0' config.Webtools.port = PORT config.Webtools.application = "an_reqmgr" config.Webtools.environment = 'production' #config.Webtools.environment = 'development' config.Webtools.proxy_base = 'True' config.Webtools.thread_pool = 30 thread.stack_size(128*1024) config.component_('SecurityModule') config.SecurityModule.key_file = "%s/auth/wmcore-auth/header-auth-key" % __file__.rsplit('/', 3)[0] config.component_('an_reqmgr') config.section_("CoreDatabase") config.CoreDatabase.connectUrl = connectUrl
from WMCore.Configuration import Configuration from os import environ config = Configuration() # This component has all the configuration of CherryPy config.component_('Webtools') config.Webtools.port = 8400 #config.Webtools.host = 'vocms33.cern.ch' #To open the socket in this interface # This is the application config.Webtools.application = 'Security' # # This is the config for the Security application # config.component_('Security') config.Security.templates = environ[ 'WMCORE_ROOT'] + '/src/templates/WMCore/WebTools' config.Security.admin = '*****@*****.**' config.Security.title = 'CMS OpenID Server' config.Security.description = 'CMS OpenID Server' config.Security.index = 'oidserver' # The is the URL the server should announce to users or consumers # This is optional. Defaults to http://host:port/ #config.Security.server_url = 'https://'+config.Webtools.host+':8443/openid' #config.Security.server_url = 'https://localhost:8443/openid' config.Security.store = 'filestore' config.Security.store_path = '/tmp/oidserver-store'
from WMCore.Configuration import Configuration config = Configuration() # web_server configuration config.component_('web_server') config.web_server.thread_pool = 150 config.web_server.socket_queue_size = 100 config.web_server.timeout_monitor = True config.web_server.loglevel = 0 config.web_server.host = '0.0.0.0' config.web_server.log_screen = True config.web_server.url_base = '/das' config.web_server.logfile = '' config.web_server.port = 8212 config.web_server.pid = '%s/state/das/das_web_server.pid' % __file__.rsplit( '/', 4)[0] config.web_server.status_update = 2500 config.web_server.web_workers = 50 config.web_server.queue_limit = 1000 config.web_server.qtype = 'Queue' config.web_server.adjust_input = True config.web_server.dbs_daemon = True config.web_server.dbs_daemon_interval = 600 config.web_server.dbs_daemon_expire = 3600 config.web_server.hot_threshold = 3000 config.web_server.services = ['dbs_phedex'] config.web_server.check_clients = False # set True when DAS ready # keyword search runs on different port config.web_server.kws_port = 8214 config.web_server.thread_pool_kws = 10
DBS.model.object = 'dbs.web.DBSWriterModel' DBS.section_('formatter') active.DBS.formatter.object = 'WMCore.WebTools.RESTFormatter' #Migration server page/view MIGRATE = active.section_('MIGRATE') MIGRATE.object = 'WMCore.WebTools.RESTApi' MIGRATE.section_('model') MIGRATE.model.object = 'dbs.web.DBSMigrateModel' MIGRATE.section_('formatter') MIGRATE.formatter.object = 'WMCore.WebTools.RESTFormatter' MIGRATE.version = DBSVersion MIGRATE.nthreads = 4 #config migration mover config.component_('DBSMigrationMover') config.DBSMigrationMover.default_expires = 300 config.DBSMigrationMover.pollInterval = 1 config.DBSMigrationMover.namespace = "dbs.components.migration.DBSMigrationMover" config.DBSMigrationMover.componentDir = config.General.workDir + "/Logs/MigrationMover" config.DBSMigrationMover.logLevel = 'DEBUG' config.DBSMigrationMover.workerThreads = 1 #Config insert buffer config.component_('DBSInsertBuffer') config.DBSInsertBuffer.default_expires = 300 config.DBSInsertBuffer.pollInterval = 1 config.DBSInsertBuffer.namespace = "dbs.components.insertbuffer.DBSInsertBuffer" config.DBSInsertBuffer.componentDir = config.General.workDir + "Logs/DBSInsertBuffer" config.DBSInsertBuffer.workerThreads = 1
from WMCore.Configuration import Configuration config = Configuration() trusts = config.component_('Trusts') # List of trusted servers, in this case the default docs application trusts.allowed = ['http://localhost:8080']
def getConfig(testDir): periodAlertGeneratorPollers = 40 # [second] config = Configuration() config.section_("Agent") config.Agent.useMsgService = False config.Agent.useTrigger = False config.Agent.hostName = "localhost" config.Agent.teamName = "team1,team2,cmsdataops" config.Agent.agentName = "WMAgentCommissioning" # AlertProcessor values - values for Level soft, resp. critical # are also needed by this AlertGenerator test config.component_("AlertProcessor") config.AlertProcessor.componentDir = testDir config.AlertProcessor.section_("critical") config.AlertProcessor.section_("soft") config.AlertProcessor.critical.level = 5 config.AlertProcessor.soft.level = 1 # common 'Alert' section config.section_("Alert") # destination for the alert messages config.Alert.address = "tcp://127.0.0.1:6557" # control channel (internal alerts system commands) config.Alert.controlAddr = "tcp://127.0.0.1:6559" config.component_("AlertGenerator") config.AlertGenerator.componentDir = testDir config.AlertGenerator.logLevel = 'DEBUG' # configuration for overall machine load monitor: cpuPoller (percentage values) config.AlertGenerator.section_("cpuPoller") config.AlertGenerator.cpuPoller.soft = 70 # [percent] config.AlertGenerator.cpuPoller.critical = 90 # [percent] config.AlertGenerator.cpuPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.cpuPoller.period = periodAlertGeneratorPollers # [second] # configuration for overall used physical memory monitor: memPoller (percentage of total physical memory) config.AlertGenerator.section_("memPoller") config.AlertGenerator.memPoller.soft = 70 # [percent] config.AlertGenerator.memPoller.critical = 90 # [percent] config.AlertGenerator.memPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.memPoller.period = periodAlertGeneratorPollers # [second] # configuration for available disk space monitor: diskSpacePoller (percentage usage per partition) config.AlertGenerator.section_("diskSpacePoller") config.AlertGenerator.diskSpacePoller.soft = 70 # [percent] config.AlertGenerator.diskSpacePoller.critical = 90 # [percent] config.AlertGenerator.diskSpacePoller.pollInterval = 10 # [second] # configuration for particular components CPU usage: componentCPUPoller (percentage values) config.AlertGenerator.section_("componentsCPUPoller") config.AlertGenerator.componentsCPUPoller.soft = 40 # [percent] config.AlertGenerator.componentsCPUPoller.critical = 60 # [percent] config.AlertGenerator.componentsCPUPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.componentsCPUPoller.period = periodAlertGeneratorPollers # [second] # configuration for particular components memory monitor: componentMemPoller (percentage of total physical memory) config.AlertGenerator.section_("componentsMemPoller") config.AlertGenerator.componentsMemPoller.soft = 40 # [percent] config.AlertGenerator.componentsMemPoller.critical = 60 # [percent] config.AlertGenerator.componentsMemPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.componentsMemPoller.period = periodAlertGeneratorPollers # [second] # configuration for MySQL server CPU monitor: mysqlCPUPoller (percentage values) config.AlertGenerator.section_("mysqlCPUPoller") config.AlertGenerator.mysqlCPUPoller.soft = 40 # [percent] config.AlertGenerator.mysqlCPUPoller.critical = 60 # [percent] config.AlertGenerator.mysqlCPUPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.mysqlCPUPoller.period = periodAlertGeneratorPollers # [second] # configuration for MySQL memory monitor: mysqlMemPoller (percentage values) config.AlertGenerator.section_("mysqlMemPoller") config.AlertGenerator.mysqlMemPoller.soft = 40 # [percent] config.AlertGenerator.mysqlMemPoller.critical = 60 # [percent] config.AlertGenerator.mysqlMemPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.mysqlMemPoller.period = periodAlertGeneratorPollers # [second] # configuration for MySQL database size: mysqlDbSizePoller (gigabytes values) config.AlertGenerator.section_("mysqlDbSizePoller") config.AlertGenerator.mysqlDbSizePoller.soft = 1 # GB config.AlertGenerator.mysqlDbSizePoller.critical = 2 # GB config.AlertGenerator.mysqlDbSizePoller.pollInterval = 10 # [second] # configuration for CouchDB database size monitor: couchDbSizePoller (gigabytes values) config.AlertGenerator.section_("couchDbSizePoller") config.AlertGenerator.couchDbSizePoller.couchURL = os.getenv("COUCHURL", None) config.AlertGenerator.couchDbSizePoller.soft = 1 # GB config.AlertGenerator.couchDbSizePoller.critical = 2 # GB config.AlertGenerator.couchDbSizePoller.pollInterval = 10 # [second] # configuration for CouchDB CPU monitor: couchCPUPoller (percentage values) config.AlertGenerator.section_("couchCPUPoller") config.AlertGenerator.couchCPUPoller.couchURL = os.getenv("COUCHURL", None) config.AlertGenerator.couchCPUPoller.soft = 40 # [percent] config.AlertGenerator.couchCPUPoller.critical = 60 # [percent] config.AlertGenerator.couchCPUPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.couchCPUPoller.period = periodAlertGeneratorPollers # [second] # configuration for CouchDB memory monitor: couchMemPoller (percentage values) config.AlertGenerator.section_("couchMemPoller") config.AlertGenerator.couchMemPoller.couchURL = os.getenv("COUCHURL", None) config.AlertGenerator.couchMemPoller.soft = 40 # [percent] config.AlertGenerator.couchMemPoller.critical = 60 # [percent] config.AlertGenerator.couchMemPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.couchMemPoller.period = periodAlertGeneratorPollers # [second] # configuration for CouchDB HTTP errors poller: couchErrorsPoller (number of error occurrences) # (once certain threshold of the HTTP error counters is exceeded, poller keeps sending alerts) config.AlertGenerator.section_("couchErrorsPoller") config.AlertGenerator.couchErrorsPoller.couchURL = os.getenv("COUCHURL", None) config.AlertGenerator.couchErrorsPoller.soft = 100 # [number of error occurrences] config.AlertGenerator.couchErrorsPoller.critical = 200 # [number of error occurrences] config.AlertGenerator.couchErrorsPoller.observables = (404, 500) # HTTP status codes to watch over config.AlertGenerator.couchErrorsPoller.pollInterval = 10 # [second] return config
from WMCore.Configuration import Configuration from os import environ, path config = Configuration() config.component_('Webtools') config.Webtools.application = 'WMStats' config.component_('WMStats') config.WMStats.reqmgrURL = "https://cmsweb.cern.ch/reqmgr/reqMgr/" config.WMStats.globalQueueURL = "https://cmsweb.cern.ch/couchdb/workqueue/" config.WMStats.couchURL = "http://localhost:5984/wmstats/" config.WMStats.pollInterval = 600 config.WMStats.port = 19999
from WMCore.Configuration import Configuration config = Configuration() # web_server configuration config.component_('web_server') config.web_server.thread_pool = 30 config.web_server.socket_queue_size = 15 config.web_server.loglevel = 0 config.web_server.host = '0.0.0.0' config.web_server.log_screen = True config.web_server.url_base = '/das' config.web_server.logfile = '' config.web_server.port = 8212 config.web_server.pid = '%s/state/das/das_web_server.pid' % __file__.rsplit( '/', 4)[0] config.web_server.status_update = 2500 config.web_server.web_workers = 50 config.web_server.queue_limit = 1000 config.web_server.qtype = 'Queue' config.web_server.adjust_input = False # keyword search supersedes this, use True to disable KWS config.web_server.dbs_daemon = True config.web_server.dbs_daemon_interval = 600 config.web_server.dbs_daemon_expire = 3600 config.web_server.hot_threshold = 3000 config.web_server.onhold_daemon = False config.web_server.services = ['dbs_phedex'] # keyword search runs on different port config.web_server.kws_port = 8214 config.web_server.thread_pool_kws = 10 config.web_server.kws_host = '0.0.0.0'
from WMCore.Configuration import Configuration config = Configuration() config.section_('Agent') # Agent: config.Agent.hostName = None config.Agent.contact = None config.Agent.teamName = "team_usa" config.Agent.agentName = None config.section_('General') # General: General Settings Section config.General.workDir = '/home/test/application/WMAgentEmulator' config.section_('CoreDatabase') # CoreDatabase: # dialect: Choose between oracle, mysql or sqlite # socket: Set the socket file location for mysql (optional) # config.CoreDatabase.connectUrl='mysql://*****:*****@cmssrv18.fnal.gov:3306/TestDB' config.component_('WMAgentEmulator') # WMAgents: config.WMAgentEmulator.componentDir = config.General.workDir + '/WMAgentEmulator' config.WMAgentEmulator.namespace = "WMQuality.Emulators.WMAgents.WMAgentEmulator" config.WMAgentEmulator.pollInterval = 10
cache_area = "url_to_CS_cache" ui_script = '/afs/cern.ch/cms/LCG/LCG-2/UI/cms_ui_env.sh' config = Configuration() config.section_('General') config.General.workDir = workDirectory config.section_("CoreDatabase") config.CoreDatabase.connectUrl = databaseUrl config.section_("Agent") config.Agent.contact = userEmail config.Agent.agentName = agentName config.Agent.hostName = serverHostName config.Agent.teamName = teamName config.component_("AsyncTransfer") config.AsyncTransfer.log_level = logging.INFO config.AsyncTransfer.namespace = "AsyncStageOut.AsyncTransfer" config.AsyncTransfer.componentDir = config.General.workDir config.AsyncTransfer.pollInterval = 10 config.AsyncTransfer.pollViewsInterval = 10 config.AsyncTransfer.couch_instance = couchUrl config.AsyncTransfer.files_database = files_database config.AsyncTransfer.requests_database = requests_database config.AsyncTransfer.data_source = couchURLSource config.AsyncTransfer.db_source = database_src config.AsyncTransfer.pluginName = "CentralMonitoring" config.AsyncTransfer.pluginDir = "AsyncStageOut.Plugins" config.AsyncTransfer.max_files_per_transfer = 100 config.AsyncTransfer.pool_size = 80 config.AsyncTransfer.max_retry = 3
# instance name : connecturls, {reader needed roles, writer needed roles} if VARIANT == 'prod': db_mapping = {'prod/global': [dbs3_pg_r, {'reader':{},'writer':{'dbs': 'operator', 'dataops': 'production-operator'}}]} elif VARIANT == 'preprod': db_mapping = {'int/global': [dbs3_ig_i2, {'reader':{},'writer':{'dbs': 'operator', 'dataops': 'production-operator'}}]} elif VARIANT == 'dev': db_mapping = {'dev/global': [dbs3_dg_i2, {'reader':{},'writer':{'dbs': 'operator', 'dataops': 'production-operator'}}]} elif VARIANT == 'k8s': db_mapping = {'int/global': [dbs3_k8sg_r,{'reader':{},'writer':{'dbs': 'operator', 'dataops': 'production-operator'}}]} elif VARIANT == 'k8s-dev': db_mapping = {'dev/global': [dbs3_p1_i2,{'reader':{},'writer':{}}]} else: db_mapping = {'dev/global': [dbs3_l2_i2,{'reader':{},'writer':{}}]} config = Configuration() config.component_('SecurityModule') config.SecurityModule.key_file = os.path.join(ROOTDIR, 'auth/wmcore-auth/header-auth-key') config.component_('Webtools') config.Webtools.port = 8252 config.Webtools.thread_pool = 30 # The maximum number of requests which will be queued up before # the server refuses to accept it (default -1, meaning no limit). config.Webtools.accepted_queue_size = -1 config.Webtools.accepted_queue_timeout = 1 # enable CherryPy statistics monitoring config.Webtools.cpstats = False config.Webtools.log_screen = False config.Webtools.proxy_base = 'True' config.Webtools.application = 'dbs' config.Webtools.environment = 'production'
config = Configuration() # This is the Security config the application will use #config.component_('SecurityModule') #config.SecurityModule.enabled = True #config.SecurityModule.oid_server = 'http://*****:*****@gmail.com'
config.General.workDir = "/storage/local/data1/wmagent/work" config.section_("JobStateMachine") config.JobStateMachine.couchurl = "http://*****:*****@localhost/WMAgentDB_sfoulkes" config.component_("PromptSkimScheduler") config.PromptSkimScheduler.namespace = "WMComponent.PromptSkimScheduler.PromptSkimScheduler" config.PromptSkimScheduler.componentDir = config.General.workDir + "/PromptSkimScheduler" config.PromptSkimScheduler.logLevel = "DEBUG" config.PromptSkimScheduler.pollInterval = 10 config.PromptSkimScheduler.workloadCache = config.General.workDir + "/PromptSkimWorkloads" config.PromptSkimScheduler.scramArch = "slc5_ia32_gcc434" config.PromptSkimScheduler.cmsPath = "/uscmst1/prod/sw/cms" config.PromptSkimScheduler.filesPerJob = 1 config.PromptSkimScheduler.maxMergeEvents = 100000 config.PromptSkimScheduler.maxMergeSize = 4294967296 config.PromptSkimScheduler.minMergeSize = 500000000 config.PromptSkimScheduler.maxMergeFiles = 50 config.PromptSkimScheduler.phedexURL = "https://cmsweb.cern.ch/phedex/datasvc/json/prod/" config.PromptSkimScheduler.t0astURL = "oracle://*****:*****@cmscald:1521"
'prod/phys03': 1, 'prod/caf': 1, 'prod/test': 1, 'int/global': 1, 'int/phys01': 1, 'int/phys02': 1, 'int/phys03': 1, 'dev/global': 1, 'dev/phys01': 1, 'dev/phys02': 1, 'dev/phys03': 1 } config = Configuration() config.component_('web') config.web.host = "127.0.0.1" config.web.port = 8251 config.web.log_screen = True config.web.thread_pool = 50 config.component_('dbsmigration') config.dbsmigration.instances = view_mapping[VARIANT]['DBSMigrate'] config.dbsmigration.section_('database') db_instances = config.dbsmigration.database.section_('instances') for instance_name in config.dbsmigration.instances: db_config_section = db_instances.section_(instance_name) db_config_section.threads = thread_mapping[instance_name] db_config_section.dbowner = db_mapping[instance_name]['databaseOwner'] db_config_section.connectUrl = db_mapping[instance_name]['connectUrl'][ 'writer']
from WMCore.Configuration import Configuration from WMCore.WMBase import getWMBASE import os.path from os import environ config = Configuration() # This component has all the configuration of CherryPy config.component_('Webtools') # This is the application config.Webtools.port = 8201 # INADDR_ANY: listen on all interfaces (be visible outside of localhost) config.Webtools.host = '0.0.0.0' config.Webtools.application = 'FileMover' # This is the config for the application config.component_('FileMover') # Define the default location for templates for the app config.FileMover.templates = environ['FILEMOVER_ROOT'] + '/lib/python2.6/site-packages/fm/web/templates' config.FileMover.admin = '*****@*****.**' config.FileMover.title = 'CMS FileMover Documentation' config.FileMover.description = 'Documentation on the FileMover' config.FileMover.index = "FileMover" # phedex section phedex = config.FileMover.section_('phedex') phedex.url = 'https://cmsweb.cern.ch/phedex/datasvc/json/prod' # sitedb section
DBS Server cmsweb pre-prod configuration file """ import os,logging,sys from WMCore.Configuration import Configuration ROOTDIR = os.path.normcase(os.path.abspath(__file__)).rsplit('/', 3)[0] DBSVERSION = os.getenv('DBS3_VERSION') sys.path.append(os.path.join(ROOTDIR,'auth/dbs')) from DBSSecrets import dbs3_l3_i2 from DBSSecrets import dbs3_l2_i2 from DBSSecrets import dbs3_l1_i2 config = Configuration() config.component_('SecurityModule') config.SecurityModule.key_file = os.path.join(ROOTDIR,'auth/wmcore-auth/header-auth-key') config.component_('Webtools') config.Webtools.port = 8250 config.Webtools.log_screen = True config.Webtools.proxy_base = 'True' config.Webtools.application = 'dbs' config.Webtools.environment = 'production' config.component_('dbs') config.dbs.templates = os.path.join(ROOTDIR,'apps/dbs/data/templates/WMCore/WebTools') config.dbs.title = 'DBS Server' config.dbs.description = 'CMS DBS Service' config.dbs.section_('views') config.dbs.admin = 'cmsdbs'
from WMCore.Configuration import Configuration config = Configuration() # web_server configuration config.component_('web_server') config.web_server.thread_pool = 150 config.web_server.socket_queue_size = 100 config.web_server.timeout_monitor = True config.web_server.loglevel = 0 config.web_server.host = '0.0.0.0' config.web_server.log_screen = True config.web_server.url_base = '/das' config.web_server.logfile = '' config.web_server.port = 8212 config.web_server.pid = '%s/state/das/das_web_server.pid' % __file__.rsplit('/', 4)[0] config.web_server.status_update = 2500 config.web_server.web_workers = 50 config.web_server.queue_limit = 1000 config.web_server.qtype = 'Queue' config.web_server.adjust_input = True config.web_server.dbs_daemon = True config.web_server.dbs_daemon_interval = 600 config.web_server.dbs_daemon_expire = 3600 config.web_server.hot_threshold = 3000 config.web_server.services = ['dbs_phedex'] config.web_server.check_clients = False # set True when DAS ready # keyword search runs on different port config.web_server.kws_port = 8214 config.web_server.thread_pool_kws=10 config.web_server.kws_host = '0.0.0.0'
DBS Server default configuration file """ import os, logging, sys from WMCore.Configuration import Configuration ROOTDIR = os.path.normcase(os.path.abspath(__file__)).rsplit("/", 3)[0] DBSVERSION = os.getenv("DBS3_VERSION") sys.path.append(os.path.join(ROOTDIR, "auth/dbs")) from DBSSecrets import dbs3_l3_i2 from DBSSecrets import dbs3_l2_i2 from DBSSecrets import dbs3_l1_i2 config = Configuration() config.component_("SecurityModule") config.SecurityModule.key_file = os.path.join(ROOTDIR, "auth/wmcore-auth/header-auth-key") config.component_("Webtools") config.Webtools.port = 8250 config.Webtools.log_screen = False config.Webtools.proxy_base = "True" config.Webtools.application = "dbs" config.Webtools.environment = "production" config.component_("dbs") config.dbs.templates = os.path.join(ROOTDIR, "apps/dbs/data/templates/WMCore/WebTools") config.dbs.title = "DBS Server" config.dbs.description = "CMS DBS Service" config.dbs.section_("views") config.dbs.admin = "cmsdbs"
class TestChangeState(unittest.TestCase): def setUp(self): """ _setUp_ """ self.transitions = Transitions() self.testInit = TestInitCouchApp(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setupCouch("changestate_t/jobs", "JobDump") self.testInit.setupCouch("changestate_t/fwjrs", "FWJRDump") self.testInit.setupCouch("job_summary", "WMStats") self.testInit.setSchema(customModules = ["WMCore.WMBS"], useDefault = False) myThread = threading.currentThread() self.daoFactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.couchServer = CouchServer(dburl = os.getenv("COUCHURL")) self.config = Configuration() self.config.component_("JobStateMachine") self.config.JobStateMachine.couchurl = os.getenv("COUCHURL") self.config.JobStateMachine.jobSummaryDBName = "job_summary" return def tearDown(self): """ _tearDown_ Cleanup the databases. """ self.testInit.clearDatabase() self.testInit.tearDownCouch() return def testCheck(self): """ This is the test class for function Check from module ChangeState """ change = ChangeState(self.config, "changestate_t") # Run through all good state transitions and assert that they work for state in self.transitions.keys(): for dest in self.transitions[state]: change.check(dest, state) dummystates = ['dummy1', 'dummy2', 'dummy3', 'dummy4'] # Then run through some bad state transistions and assertRaises(AssertionError) for state in self.transitions.keys(): for dest in dummystates: self.assertRaises(AssertionError, change.check, dest, state) return def testRecordInCouch(self): """ _testRecordInCouch_ Verify that jobs, state transitions and fwjrs are recorded correctly. """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, split_algo = "FileBased") testSubscription.create() testFileA = File(lfn = "SomeLFNA", events = 1024, size = 2048, locations = set(["somese.cern.ch"])) testFileB = File(lfn = "SomeLFNB", events = 1025, size = 2049, locations = set(["somese.cern.ch"])) testFileA.create() testFileB.create() testFileset.addFile(testFileA) testFileset.addFile(testFileB) testFileset.commit() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 2, \ "Error: Splitting should have created two jobs." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Merge" testJobB = jobGroup.jobs[1] testJobB["user"] = "******" testJobB["group"] = "DMWM" testJobB["taskType"] = "Processing" change.propagate([testJobA, testJobB], "new", "none") change.propagate([testJobA, testJobB], "created", "new") change.propagate([testJobA, testJobB], "executing", "created") testJobADoc = change.jobsdatabase.document(testJobA["couch_record"]) for transition in testJobADoc["states"].itervalues(): self.assertTrue(type(transition["timestamp"]) in (types.IntType, types.LongType)) assert testJobADoc["jobid"] == testJobA["id"], \ "Error: ID parameter is incorrect." assert testJobADoc["name"] == testJobA["name"], \ "Error: Name parameter is incorrect." assert testJobADoc["jobgroup"] == testJobA["jobgroup"], \ "Error: Jobgroup parameter is incorrect." assert testJobADoc["workflow"] == testJobA["workflow"], \ "Error: Workflow parameter is incorrect." assert testJobADoc["task"] == testJobA["task"], \ "Error: Task parameter is incorrect." assert testJobADoc["owner"] == testJobA["owner"], \ "Error: Owner parameter is incorrect." assert testJobADoc["mask"]["FirstEvent"] == testJobA["mask"]["FirstEvent"], \ "Error: First event in mask is incorrect." assert testJobADoc["mask"]["LastEvent"] == testJobA["mask"]["LastEvent"], \ "Error: Last event in mask is incorrect." assert testJobADoc["mask"]["FirstLumi"] == testJobA["mask"]["FirstLumi"], \ "Error: First lumi in mask is incorrect." assert testJobADoc["mask"]["LastLumi"] == testJobA["mask"]["LastLumi"], \ "Error: First lumi in mask is incorrect." assert testJobADoc["mask"]["FirstRun"] == testJobA["mask"]["FirstRun"], \ "Error: First run in mask is incorrect." assert testJobADoc["mask"]["LastEvent"] == testJobA["mask"]["LastRun"], \ "Error: First event in mask is incorrect." assert len(testJobADoc["inputfiles"]) == 1, \ "Error: Input files parameter is incorrect." testJobBDoc = change.jobsdatabase.document(testJobB["couch_record"]) assert testJobBDoc["jobid"] == testJobB["id"], \ "Error: ID parameter is incorrect." assert testJobBDoc["name"] == testJobB["name"], \ "Error: Name parameter is incorrect." assert testJobBDoc["jobgroup"] == testJobB["jobgroup"], \ "Error: Jobgroup parameter is incorrect." assert testJobBDoc["mask"]["FirstEvent"] == testJobB["mask"]["FirstEvent"], \ "Error: First event in mask is incorrect." assert testJobBDoc["mask"]["LastEvent"] == testJobB["mask"]["LastEvent"], \ "Error: Last event in mask is incorrect." assert testJobBDoc["mask"]["FirstLumi"] == testJobB["mask"]["FirstLumi"], \ "Error: First lumi in mask is incorrect." assert testJobBDoc["mask"]["LastLumi"] == testJobB["mask"]["LastLumi"], \ "Error: First lumi in mask is incorrect." assert testJobBDoc["mask"]["FirstRun"] == testJobB["mask"]["FirstRun"], \ "Error: First run in mask is incorrect." assert testJobBDoc["mask"]["LastEvent"] == testJobB["mask"]["LastRun"], \ "Error: First event in mask is incorrect." assert len(testJobBDoc["inputfiles"]) == 1, \ "Error: Input files parameter is incorrect." changeStateDB = self.couchServer.connectDatabase(dbname = "changestate_t/jobs") allDocs = changeStateDB.document("_all_docs") self.assertEqual(len(allDocs["rows"]), 3, "Error: Wrong number of documents.") couchJobDoc = changeStateDB.document("1") assert couchJobDoc["name"] == testJobA["name"], \ "Error: Name is wrong" assert len(couchJobDoc["inputfiles"]) == 1, \ "Error: Wrong number of input files." result = changeStateDB.loadView("JobDump", "jobsByWorkflowName") self.assertEqual(len(result["rows"]), 2, "Error: Wrong number of rows.") for row in result["rows"]: couchJobDoc = changeStateDB.document(row["value"]["id"]) self.assertEqual(couchJobDoc["_rev"], row["value"]["rev"], "Error: Rev is wrong.") return def testUpdateFailedDoc(self): """ _testUpdateFailedDoc_ Verify that the update function will work correctly and not throw a 500 error if the doc didn't make it into the database for some reason. """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, split_algo = "FileBased") testSubscription.create() testFileA = File(lfn = "SomeLFNA", events = 1024, size = 2048, locations = set(["somese.cern.ch"])) testFileA.create() testFileset.addFile(testFileA) testFileset.commit() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Merge" testJobA["couch_record"] = str(testJobA["id"]) change.propagate([testJobA], "new", "none") testJobADoc = change.jobsdatabase.document(testJobA["couch_record"]) self.assertTrue(testJobADoc.has_key("states")) self.assertTrue(testJobADoc["states"].has_key("1")) return def testPersist(self): """ _testPersist_ This is the test class for function Propagate from module ChangeState """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() for i in range(4): newFile = File(lfn = "File%s" % i, locations = set(["somese.cern.ch"])) newFile.create() testFileset.addFile(newFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, split_algo = "FileBased") testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 4, \ "Error: Splitting should have created four jobs." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Processing" testJobB = jobGroup.jobs[1] testJobB["user"] = "******" testJobB["group"] = "DMWM" testJobB["taskType"] = "Processing" testJobC = jobGroup.jobs[2] testJobC["user"] = "******" testJobC["group"] = "DMWM" testJobC["taskType"] = "Processing" testJobD = jobGroup.jobs[3] testJobD["user"] = "******" testJobD["group"] = "DMWM" testJobD["taskType"] = "Processing" change.persist([testJobA, testJobB], "created", "new") change.persist([testJobC, testJobD], "new", "none") stateDAO = self.daoFactory(classname = "Jobs.GetState") jobAState = stateDAO.execute(id = testJobA["id"]) jobBState = stateDAO.execute(id = testJobB["id"]) jobCState = stateDAO.execute(id = testJobC["id"]) jobDState = stateDAO.execute(id = testJobD["id"]) assert jobAState == "created" and jobBState =="created" and \ jobCState == "new" and jobDState == "new", \ "Error: Jobs didn't change state correctly." return def testRetryCount(self): """ _testRetryCount_ Verify that the retry count is incremented when we move out of the submitcooloff or jobcooloff state. """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() for i in range(4): newFile = File(lfn = "File%s" % i, locations = set(["somese.cern.ch"])) newFile.create() testFileset.addFile(newFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, split_algo = "FileBased") testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 4, \ "Error: Splitting should have created four jobs." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Processing" testJobB = jobGroup.jobs[1] testJobB["user"] = "******" testJobB["group"] = "DMWM" testJobB["taskType"] = "Processing" testJobC = jobGroup.jobs[2] testJobC["user"] = "******" testJobC["group"] = "DMWM" testJobC["taskType"] = "Processing" testJobD = jobGroup.jobs[3] testJobD["user"] = "******" testJobD["group"] = "DMWM" testJobD["taskType"] = "Processing" change.persist([testJobA], "created", "submitcooloff") change.persist([testJobB], "created", "jobcooloff") change.persist([testJobC, testJobD], "new", "none") testJobA.load() testJobB.load() testJobC.load() testJobD.load() assert testJobA["retry_count"] == 1, \ "Error: Retry count is wrong." assert testJobB["retry_count"] == 1, \ "Error: Retry count is wrong." assert testJobC["retry_count"] == 0, \ "Error: Retry count is wrong." assert testJobD["retry_count"] == 0, \ "Error: Retry count is wrong." return def testJobSerialization(self): """ _testJobSerialization_ Verify that serialization of a job works when adding a FWJR. """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() testFile = File(lfn = "SomeLFNC", locations = set(["somese.cern.ch"])) testFile.create() testFileset.addFile(testFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow) testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 1, \ "Error: Splitting should have created one job." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Processing" change.propagate([testJobA], 'created', 'new') myReport = Report() reportPath = os.path.join(getTestBase(), "WMCore_t/JobStateMachine_t/Report.pkl") myReport.unpersist(reportPath) testJobA["fwjr"] = myReport change.propagate([testJobA], 'executing', 'created') changeStateDB = self.couchServer.connectDatabase(dbname = "changestate_t/fwjrs") allDocs = changeStateDB.document("_all_docs") self.assertEqual(len(allDocs["rows"]), 2, "Error: Wrong number of documents") result = changeStateDB.loadView("FWJRDump", "fwjrsByWorkflowName") self.assertEqual(len(result["rows"]), 1, "Error: Wrong number of rows.") for row in result["rows"]: couchJobDoc = changeStateDB.document(row["value"]["id"]) self.assertEqual(couchJobDoc["_rev"], row["value"]["rev"], "Error: Rev is wrong.") for resultRow in allDocs["rows"]: if resultRow["id"] != "_design/FWJRDump": fwjrDoc = changeStateDB.document(resultRow["id"]) break assert fwjrDoc["retrycount"] == 0, \ "Error: Retry count is wrong." assert len(fwjrDoc["fwjr"]["steps"].keys()) == 2, \ "Error: Wrong number of steps in FWJR." assert "cmsRun1" in fwjrDoc["fwjr"]["steps"].keys(), \ "Error: cmsRun1 step is missing from FWJR." assert "stageOut1" in fwjrDoc["fwjr"]["steps"].keys(), \ "Error: stageOut1 step is missing from FWJR." return def testDuplicateJobReports(self): """ _testDuplicateJobReports_ Verify that everything works correctly if a job report is added to the database more than once. """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() testFile = File(lfn = "SomeLFNC", locations = set(["somese.cern.ch"])) testFile.create() testFileset.addFile(testFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow) testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 1, \ "Error: Splitting should have created one job." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Processing" change.propagate([testJobA], 'created', 'new') myReport = Report() reportPath = os.path.join(getTestBase(), "WMCore_t/JobStateMachine_t/Report.pkl") myReport.unpersist(reportPath) testJobA["fwjr"] = myReport change.propagate([testJobA], 'executing', 'created') change.propagate([testJobA], 'executing', 'created') changeStateDB = self.couchServer.connectDatabase(dbname = "changestate_t/fwjrs") allDocs = changeStateDB.document("_all_docs") self.assertEqual(len(allDocs["rows"]), 2, "Error: Wrong number of documents") for resultRow in allDocs["rows"]: if resultRow["id"] != "_design/FWJRDump": fwjrDoc = changeStateDB.document(resultRow["id"]) break return def testJobKilling(self): """ _testJobKilling_ Test that we can successfully set jobs to the killed state """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() for i in range(4): newFile = File(lfn = "File%s" % i, locations = set(["somese.cern.ch"])) newFile.create() testFileset.addFile(newFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, split_algo = "FileBased") testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 4, \ "Error: Splitting should have created four jobs." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Processing" testJobB = jobGroup.jobs[1] testJobB["user"] = "******" testJobB["group"] = "DMWM" testJobB["taskType"] = "Processing" testJobC = jobGroup.jobs[2] testJobC["user"] = "******" testJobC["group"] = "DMWM" testJobC["taskType"] = "Processing" testJobD = jobGroup.jobs[3] testJobD["user"] = "******" testJobD["group"] = "DMWM" testJobD["taskType"] = "Processing" change.persist([testJobA], "created", "new") change.persist([testJobB], "jobfailed", "executing") change.persist([testJobC, testJobD], "executing", "created") change.persist([testJobA], "killed", "created") change.persist([testJobB], "killed", "jobfailed") change.persist([testJobC, testJobD], "killed", "executing") for job in [testJobA, testJobB, testJobC, testJobD]: job.load() self.assertEqual(job['retry_count'], 99999) self.assertEqual(job['state'], 'killed') return def testFWJRInputFileTruncation(self): """ _testFWJRInputFileTruncation_ Test and see whether the ChangeState code can be used to automatically truncate the number of input files in a FWJR Code stolen from the serialization test """ self.config.JobStateMachine.maxFWJRInputFiles = 0 change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() testFile = File(lfn = "SomeLFNC", locations = set(["somese.cern.ch"])) testFile.create() testFileset.addFile(testFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow) testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] self.assertEqual(len(jobGroup.jobs), 1, "Error: Splitting should have created one job.") testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Processing" change.propagate([testJobA], 'created', 'new') myReport = Report() reportPath = os.path.join(getTestBase(), "WMCore_t/JobStateMachine_t/Report.pkl") myReport.unpersist(reportPath) testJobA["fwjr"] = myReport change.propagate([testJobA], 'executing', 'created') changeStateDB = self.couchServer.connectDatabase(dbname = "changestate_t/fwjrs") allDocs = changeStateDB.document("_all_docs") self.assertEqual(len(allDocs["rows"]), 2, "Error: Wrong number of documents") result = changeStateDB.loadView("FWJRDump", "fwjrsByWorkflowName") self.assertEqual(len(result["rows"]), 1, "Error: Wrong number of rows.") for row in result["rows"]: couchJobDoc = changeStateDB.document(row["value"]["id"]) self.assertEqual(couchJobDoc["_rev"], row["value"]["rev"], "Error: Rev is wrong.") for resultRow in allDocs["rows"]: if resultRow["id"] != "_design/FWJRDump": fwjrDoc = changeStateDB.document(resultRow["id"]) break self.assertEqual(fwjrDoc["fwjr"]["steps"]['cmsRun1']['input']['source'], []) return def testJobSummary(self): """ _testJobSummary_ verify that job summary for jobs with fwjr are correctly created and that status is updated when updatesummary flag is enabled """ change = ChangeState(self.config, "changestate_t") locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute("site1", seName = "somese.cern.ch") testWorkflow = Workflow(spec = "spec.xml", owner = "Steve", name = "wf001", task = "Test") testWorkflow.create() testFileset = Fileset(name = "TestFileset") testFileset.create() testFile = File(lfn = "SomeLFNC", locations = set(["somese.cern.ch"])) testFile.create() testFileset.addFile(testFile) testFileset.commit() testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow) testSubscription.create() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = testSubscription) jobGroup = jobFactory(files_per_job = 1)[0] assert len(jobGroup.jobs) == 1, \ "Error: Splitting should have created one job." testJobA = jobGroup.jobs[0] testJobA["user"] = "******" testJobA["group"] = "DMWM" testJobA["taskType"] = "Analysis" change.propagate([testJobA], 'created', 'new') myReport = Report() reportPath = os.path.join(getTestBase(), "WMCore_t/JobStateMachine_t/Report.pkl") myReport.unpersist(reportPath) change.propagate([testJobA], 'executing', 'created') testJobA["fwjr"] = myReport change.propagate([testJobA], 'jobfailed', 'executing') changeStateDB = self.couchServer.connectDatabase(dbname = "job_summary") allDocs = changeStateDB.document("_all_docs") self.assertEqual(len(allDocs["rows"]), 2, "Error: Wrong number of documents") fwjrDoc = {'state': None} for resultRow in allDocs["rows"]: if resultRow["id"] != "_design/WMStats": fwjrDoc = changeStateDB.document(resultRow["id"]) break self.assertEqual(fwjrDoc['state'], 'jobfailed', "Error: summary doesn't have the expected job state") del testJobA["fwjr"] change.propagate([testJobA], 'jobcooloff', 'jobfailed', updatesummary = True) return
config.section_("BossAir") config.BossAir.pluginDir = "WMCore.BossAir.Plugins" config.BossAir.pluginNames = bossAirPlugins config.BossAir.nCondorProcesses = 1 config.BossAir.multicoreTaskTypes = ["MultiProcessing", "MultiProduction"] config.BossAir.submitWMSMode = True config.section_("CoreDatabase") config.CoreDatabase.connectUrl = databaseUrl #config.CoreDatabase.socket = databaseSocket config.section_("DashboardReporter") config.DashboardReporter.dashboardHost = "cms-wmagent-job.cern.ch" config.DashboardReporter.dashboardPort = 8884 config.component_('WorkQueueManager') config.WorkQueueManager.namespace = "WMComponent.WorkQueueManager.WorkQueueManager" config.WorkQueueManager.componentDir = config.General.workDir + "/WorkQueueManager" config.WorkQueueManager.level = 'LocalQueue' config.WorkQueueManager.logLevel = globalLogLevel config.WorkQueueManager.couchurl = couchURL config.WorkQueueManager.dbname = workqueueDBName config.WorkQueueManager.inboxDatabase = workqueueInboxDbName config.WorkQueueManager.queueParams = {} config.WorkQueueManager.queueParams[ "ParentQueueCouchUrl"] = "https://cmsweb.cern.ch/couchdb/workqueue" # this has to be unique for different work queue. This is just place holder config.WorkQueueManager.queueParams["QueueURL"] = "http://%s:5984" % ( config.Agent.hostName) config.component_("DBS3Upload")
from WMCore.Configuration import Configuration config = Configuration() # web_server configuration config.component_('web_server') config.web_server.thread_pool = 30 config.web_server.socket_queue_size = 15 config.web_server.loglevel = 0 config.web_server.host = '0.0.0.0' config.web_server.log_screen = True config.web_server.url_base = '/das' config.web_server.logfile = '' config.web_server.port = 8212 config.web_server.pid = '%s/state/das/das_web_server.pid' % __file__.rsplit('/', 4)[0] config.web_server.status_update = 2500 config.web_server.number_of_workers = 8 config.web_server.queue_limit = 100 config.web_server.adjust_input = True config.web_server.dbs_daemon = True config.web_server.dbs_daemon_interval = 600 config.web_server.dbs_daemon_expire = 3600 config.web_server.hot_threshold = 3000 config.web_server.onhold_daemon = True # cache requests configuration config.component_('cacherequests') config.cacherequests.Admin = 5000 config.cacherequests.Unlimited = 10000 config.cacherequests.ProductionAccess = 5000
def getConfig(testDir): periodAlertGeneratorPollers = 40 # [second] config = Configuration() config.section_("Agent") config.Agent.useMsgService = False config.Agent.useTrigger = False config.Agent.hostName = "localhost" config.Agent.teamName = "team1,team2,cmsdataops" config.Agent.agentName = "WMAgentCommissioning" # AlertProcessor values - values for Level soft, resp. critical # are also needed by this AlertGenerator test config.component_("AlertProcessor") config.AlertProcessor.componentDir = testDir config.AlertProcessor.section_("critical") config.AlertProcessor.section_("soft") config.AlertProcessor.critical.level = 5 config.AlertProcessor.soft.level = 0 # common 'Alert' section config.section_("Alert") # destination for the alert messages config.Alert.address = "tcp://127.0.0.1:6557" # control channel (internal alerts system commands) config.Alert.controlAddr = "tcp://127.0.0.1:6559" config.component_("AlertGenerator") config.AlertGenerator.componentDir = testDir # configuration for overall machine load monitor: cpuPoller (percentage values) config.AlertGenerator.section_("cpuPoller") config.AlertGenerator.cpuPoller.soft = 70 # [percent] config.AlertGenerator.cpuPoller.critical = 90 # [percent] config.AlertGenerator.cpuPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.cpuPoller.period = periodAlertGeneratorPollers # [second] # configuration for overall used physical memory monitor: memPoller (percentage of total physical memory) config.AlertGenerator.section_("memPoller") config.AlertGenerator.memPoller.soft = 70 # [percent] config.AlertGenerator.memPoller.critical = 90 # [percent] config.AlertGenerator.memPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.memPoller.period = periodAlertGeneratorPollers # [second] # configuration for available disk space monitor: diskSpacePoller (percentage usage per partition) config.AlertGenerator.section_("diskSpacePoller") config.AlertGenerator.diskSpacePoller.soft = 70 # [percent] config.AlertGenerator.diskSpacePoller.critical = 90 # [percent] config.AlertGenerator.diskSpacePoller.pollInterval = 10 # [second] # configuration for particular components CPU usage: componentCPUPoller (percentage values) config.AlertGenerator.section_("componentsCPUPoller") config.AlertGenerator.componentsCPUPoller.soft = 40 # [percent] config.AlertGenerator.componentsCPUPoller.critical = 60 # [percent] config.AlertGenerator.componentsCPUPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.componentsCPUPoller.period = periodAlertGeneratorPollers # [second] # configuration for particular components memory monitor: componentMemPoller (percentage of total physical memory) config.AlertGenerator.section_("componentsMemPoller") config.AlertGenerator.componentsMemPoller.soft = 40 # [percent] config.AlertGenerator.componentsMemPoller.critical = 60 # [percent] config.AlertGenerator.componentsMemPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.componentsMemPoller.period = periodAlertGeneratorPollers # [second] # configuration for MySQL server CPU monitor: mysqlCPUPoller (percentage values) config.AlertGenerator.section_("mysqlCPUPoller") config.AlertGenerator.mysqlCPUPoller.soft = 40 # [percent] config.AlertGenerator.mysqlCPUPoller.critical = 60 # [percent] config.AlertGenerator.mysqlCPUPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.mysqlCPUPoller.period = periodAlertGeneratorPollers # [second] # configuration for MySQL memory monitor: mysqlMemPoller (percentage values) config.AlertGenerator.section_("mysqlMemPoller") config.AlertGenerator.mysqlMemPoller.soft = 40 # [percent] config.AlertGenerator.mysqlMemPoller.critical = 60 # [percent] config.AlertGenerator.mysqlMemPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.mysqlMemPoller.period = periodAlertGeneratorPollers # [second] # configuration for MySQL database size: mysqlDbSizePoller (gigabytes values) config.AlertGenerator.section_("mysqlDbSizePoller") config.AlertGenerator.mysqlDbSizePoller.soft = 1 # GB config.AlertGenerator.mysqlDbSizePoller.critical = 2 # GB config.AlertGenerator.mysqlDbSizePoller.pollInterval = 10 # [second] # configuration for CouchDB database size monitor: couchDbSizePoller (gigabytes values) config.AlertGenerator.section_("couchDbSizePoller") config.AlertGenerator.couchDbSizePoller.soft = 1 # GB config.AlertGenerator.couchDbSizePoller.critical = 2 # GB config.AlertGenerator.couchDbSizePoller.pollInterval = 10 # [second] # configuration for CouchDB CPU monitor: couchCPUPoller (percentage values) config.AlertGenerator.section_("couchCPUPoller") config.AlertGenerator.couchCPUPoller.soft = 40 # [percent] config.AlertGenerator.couchCPUPoller.critical = 60 # [percent] config.AlertGenerator.couchCPUPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.couchCPUPoller.period = periodAlertGeneratorPollers # [second] # configuration for CouchDB memory monitor: couchMemPoller (percentage values) config.AlertGenerator.section_("couchMemPoller") config.AlertGenerator.couchMemPoller.soft = 40 # [percent] config.AlertGenerator.couchMemPoller.critical = 60 # [percent] config.AlertGenerator.couchMemPoller.pollInterval = 10 # [second] # period during which measurements are collected before evaluating for possible alert triggering config.AlertGenerator.couchMemPoller.period = periodAlertGeneratorPollers # [second] # configuration for CouchDB HTTP errors poller: couchErrorsPoller (number of error occurrences) # (once certain threshold of the HTTP error counters is exceeded, poller keeps sending alerts) config.AlertGenerator.section_("couchErrorsPoller") config.AlertGenerator.couchErrorsPoller.soft = 100 # [number of error occurrences] config.AlertGenerator.couchErrorsPoller.critical = 200 # [number of error occurrences] config.AlertGenerator.couchErrorsPoller.observables = (404, 500) # HTTP status codes to watch over config.AlertGenerator.couchErrorsPoller.pollInterval = 10 # [second] return config
from WMCore.Configuration import Configuration config = Configuration() config.section_('Agent') # Agent: config.Agent.hostName = None config.Agent.contact = None config.Agent.teamName = "team_usa" config.Agent.agentName = None config.section_('General') # General: General Settings Section config.General.workDir = '/home/test/application/WMAgentEmulator' config.section_('CoreDatabase') # CoreDatabase: # dialect: Choose between oracle, mysql or sqlite # socket: Set the socket file location for mysql (optional) # config.CoreDatabase.connectUrl = 'mysql://*****:*****@cmssrv18.fnal.gov:3306/TestDB' config.component_('WMAgentEmulator') # WMAgents: config.WMAgentEmulator.componentDir = config.General.workDir + '/WMAgentEmulator' config.WMAgentEmulator.namespace = "WMQuality.Emulators.WMAgents.WMAgentEmulator" config.WMAgentEmulator.pollInterval = 10
reqmgrCouchDB = "reqmgr_workload_cache" HOST = "cmsweb-dev.cern.ch" REQMGR = "https://cmsweb-dev.cern.ch/reqmgr/reqMgr" COUCH = "https://cmsweb-dev.cern.ch/couchdb" WEBURL = "%s/%s" % (COUCH, workqueueDBName) root = __file__.rsplit('/', 4)[0] cache_dir = os.path.join(root, 'state', 'workqueue', 'cache') os.environ['WMCORE_CACHE_DIR'] = cache_dir # Nothing after this point should need to be changed. config = Configuration() config.section_("Agent") config.Agent.hostName = HOST config.component_("WorkQueueManager") config.WorkQueueManager.namespace = "WMComponent.WorkQueueManager.WorkQueueManager" config.WorkQueueManager.couchurl = COUCH config.WorkQueueManager.dbname = workqueueDBName config.WorkQueueManager.inboxDatabase = workqueueInboxDbName config.WorkQueueManager.wmstatDBName = wmstatDBName config.WorkQueueManager.level = "GlobalQueue" config.WorkQueueManager.queueParams = { 'WMStatsCouchUrl': "%s/%s" % (COUCH, wmstatDBName) } config.WorkQueueManager.queueParams['QueueURL'] = WEBURL config.WorkQueueManager.queueParams['ReqMgrServiceURL'] = REQMGR config.WorkQueueManager.reqMgrConfig = {} config.WorkQueueManager.reqMgrConfig['endpoint'] = REQMGR
def reqMgrConfig(componentDir=basedir + "/var", installation=os.environ["WMCORE_ROOT"], port=8240, user=None, reqMgrHost="http://%s:%d" % (socket.gethostname().lower(), 8240), proxyBase=None, couchurl=os.getenv("COUCHURL"), yuiroot="/reqmgr/yuiserver/yui", configCouchDB='reqmgr_config_cache', workloadCouchDB='reqmgr_workload_cache', workloadSummaryCouchDB="workloadsummary", wmstatCouchDB="wmstats", acdcCouchDB="acdcserver", connectURL=None, startup="Root.py", addMonitor=True): config = Configuration() reqMgrHtml = os.path.join(installation, 'data/html/RequestManager') reqMgrTemplates = os.path.join( installation, 'data/templates/WMCore/WebTools/RequestManager') reqMgrJavascript = os.path.join(installation, 'data/javascript') globalOverviewTemplates = os.path.join( installation, 'data/templates/WMCore/WebTools/GlobalMonitor') globalOverviewJavascript = reqMgrJavascript globalOverviewHtml = os.path.join(installation, 'data/html') if startup == "Root.py": # CMS web mode of ReqMgr running config.component_("Webtools") config.Webtools.host = '0.0.0.0' config.Webtools.port = port config.Webtools.application = "reqmgr" if (proxyBase): config.Webtools.proxy_base = proxy_base config.Webtools.environment = 'production' config.component_('reqmgr') from ReqMgrSecrets import connectUrl config.section_("CoreDatabase") #read from Secrets file config.CoreDatabase.connectUrl = connectUrl config.reqmgr.section_('database') config.reqmgr.database.connectUrl = connectUrl else: # localhost, via wmcoreD ReqMgr running # startup = "wmcoreD" config.webapp_("reqmgr") config.reqmgr.Webtools.host = '0.0.0.0' config.reqmgr.Webtools.port = port config.reqmgr.Webtools.environment = 'devel' config.reqmgr.database.connectUrl = connectURL config.reqmgr.componentDir = componentDir config.reqmgr.templates = reqMgrTemplates config.reqmgr.html = reqMgrHtml config.reqmgr.javascript = reqMgrJavascript config.reqmgr.admin = '*****@*****.**' config.reqmgr.title = 'CMS Request Manager' config.reqmgr.description = 'CMS Request Manager' config.reqmgr.couchUrl = couchurl config.reqmgr.configDBName = configCouchDB config.reqmgr.workloadDBName = workloadCouchDB config.reqmgr.wmstatDBName = wmstatCouchDB config.reqmgr.acdcDBName = acdcCouchDB config.reqmgr.security_roles = [ 'Admin', 'Developer', 'Data Manager', 'developer', 'admin', 'data-manager' ] config.reqmgr.yuiroot = yuiroot views = config.reqmgr.section_('views') active = views.section_('active') active.section_('view') active.view.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrBrowser' active.section_('admin') active.admin.object = 'WMCore.HTTPFrontEnd.RequestManager.Admin' active.section_('approve') active.approve.object = 'WMCore.HTTPFrontEnd.RequestManager.Approve' active.section_('assign') active.assign.object = 'WMCore.HTTPFrontEnd.RequestManager.Assign' active.section_('closeout') active.closeout.object = 'WMCore.HTTPFrontEnd.RequestManager.CloseOut' active.section_('announce') active.announce.object = 'WMCore.HTTPFrontEnd.RequestManager.Announce' active.section_('reqMgr') active.reqMgr.section_('model') active.reqMgr.section_('formatter') active.reqMgr.object = 'WMCore.WebTools.RESTApi' active.reqMgr.model.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrRESTModel' active.reqMgr.default_expires = 0 # no caching active.reqMgr.formatter.object = 'WMCore.WebTools.RESTFormatter' active.reqMgr.templates = os.path.join(installation, 'data/templates/WMCore/WebTools') #deprecate the old interface active.section_('rest') active.rest.section_('model') active.rest.section_('formatter') active.rest.object = 'WMCore.WebTools.RESTApi' active.rest.model.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrRESTModel' active.rest.default_expires = 0 # no caching active.rest.formatter.object = 'WMCore.WebTools.RESTFormatter' active.rest.templates = os.path.join(installation, 'data/templates/WMCore/WebTools') active.section_('create') active.create.object = 'WMCore.HTTPFrontEnd.RequestManager.WebRequestSchema' active.create.requestor = user active.create.cmsswDefaultVersion = 'CMSSW_5_2_5' if addMonitor: active.section_('GlobalMonitor') active.GlobalMonitor.object = 'WMCore.HTTPFrontEnd.GlobalMonitor.GlobalMonitorPage' active.GlobalMonitor.templates = globalOverviewTemplates active.GlobalMonitor.javascript = globalOverviewJavascript active.GlobalMonitor.html = globalOverviewHtml active.GlobalMonitor.serviceLevel = 'RequestManager' active.section_('monitorSvc') active.monitorSvc.serviceURL = "%s/reqmgr/reqMgr" % reqMgrHost active.monitorSvc.serviceLevel = active.GlobalMonitor.serviceLevel active.monitorSvc.workloadSummaryCouchURL = "%s/%s" % ( couchurl, workloadSummaryCouchDB) active.monitorSvc.section_('model') active.monitorSvc.section_('formatter') active.monitorSvc.object = 'WMCore.WebTools.RESTApi' active.monitorSvc.model.object = 'WMCore.HTTPFrontEnd.GlobalMonitor.GlobalMonitorRESTModel' active.monitorSvc.default_expires = 0 # no caching active.monitorSvc.formatter.object = 'WMCore.WebTools.RESTFormatter' active.monitorSvc.template = os.path.join( installation, 'data/templates/WMCore/WebTools') return config
COUCH = "https://%s/couchdb" % HOST WEBURL = "%s/%s" % (COUCH, workqueueDBName) LOG_DB_URL = "%s/wmstats_logdb" % COUCH LOG_REPORTER = "global_workqueue" root = __file__.rsplit('/', 4)[0] cache_dir = os.path.join(root, 'state', 'workqueue', 'cache') os.environ['WMCORE_CACHE_DIR'] = cache_dir # Nothing after this point should need to be changed. config = Configuration() config.section_("Agent") config.Agent.hostName = HOST config.component_("WorkQueueManager") config.WorkQueueManager.namespace = "WMComponent.WorkQueueManager.WorkQueueManager" config.WorkQueueManager.couchurl = COUCH config.WorkQueueManager.dbname = workqueueDBName config.WorkQueueManager.wmstatDBName = wmstatDBName config.WorkQueueManager.inboxDatabase = workqueueInboxDbName config.WorkQueueManager.level = "GlobalQueue" config.WorkQueueManager.queueParams = {'WMStatsCouchUrl': "%s/%s" % (COUCH, wmstatDBName)} config.WorkQueueManager.queueParams['QueueURL'] = WEBURL config.WorkQueueManager.queueParams['ReqMgrServiceURL'] = REQMGR2 config.WorkQueueManager.queueParams['RequestDBURL'] = "%s/%s" % (COUCH, reqmgrCouchDB) config.WorkQueueManager.queueParams['central_logdb_url'] = LOG_DB_URL config.WorkQueueManager.queueParams['log_reporter'] = LOG_REPORTER config.WorkQueueManager.reqMgrConfig = {} config.WorkQueueManager.reqMgrConfig['endpoint'] = REQMGR # when reqmgr2 is ready change following to endpoint and reqmgr2_only to True
def configure(self, configfile, service, dbinstance): cfg = loadConfigurationFile(configfile) wconfig = cfg.section_("Webtools") app = wconfig.application appconfig = cfg.section_(app) dbsconfig = getattr(appconfig.views.active, service) # Either we change formatter # OR change the 'Accept' type to application/json (which we don't know how to do at the moment) dbsconfig.formatter.object = "WMCore.WebTools.RESTFormatter" config = Configuration() config.component_('SecurityModule') config.SecurityModule.dangerously_insecure = True config.component_('DBS') config.DBS.application = app config.DBS.model = dbsconfig.model config.DBS.formatter = dbsconfig.formatter #Does not support instances #config.DBS.instances = cfg.dbs.instances #config.DBS.database = dbsconfig.database if self.migration_test: #Use one specific database cms_dbs3_dev_phys02@int2r for migration unittests from DBSSecrets import dbs3_dp2_i2 config.DBS.section_('database') config.DBS.database.connectUrl = dbs3_dp2_i2['connectUrl'][ 'writer'] config.DBS.database.dbowner = dbs3_dp2_i2['databaseOwner'] config.DBS.database.engineParameters = { 'pool_size': 15, 'max_overflow': 10, 'pool_timeout': 200 } version = getattr(dbsconfig.database.instances, dbinstance).version config.DBS.database.version = version if version else '3.99.98' config.DBS.section_('security') config.DBS.security.params = {} else: #Use dev/global from dbs configuration for the reader, writer and dao unittests dbconfig = getattr(dbsconfig.database.instances, dbinstance) config.DBS.section_('database') config.DBS.database.connectUrl = dbconfig.connectUrl config.DBS.database.dbowner = dbconfig.dbowner config.DBS.database.engineParameters = dbconfig.engineParameters config.DBS.database.version = dbconfig.version if dbconfig.version else '3.99.98' #config.DBS.database.instance = dbconfig.instance try: secconfig = getattr(dbsconfig.security.instances, dbinstance) except AttributeError: pass else: config.DBS.section_('security') config.DBS.security.params = secconfig.params config.DBS.default_expires = 900 return config