예제 #1
0
def main():
    myPhedex = PhEDEx()
    config = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
    config.CoreDatabase.dialect = "mysql"
    init = WMInit()
    init.setDatabaseConnection(config.CoreDatabase.connectUrl, config.CoreDatabase.dialect, config.CoreDatabase.socket)
    myThread = threading.currentThread()
    daofactory = DAOFactory(package="WMComponent.PhEDExInjector.Database", logger=logging, dbinterface=myThread.dbi)

    getUninjectedDAO = daofactory(classname="GetUninjectedFiles")
    uninjectedFiles = getUninjectedDAO.execute()
    for location in uninjectedFiles:
        for dataset in uninjectedFiles[location]:
            for block in uninjectedFiles[location][dataset]:
                result = myPhedex.getReplicaInfoForFiles(dataset=dataset, block=block)
                phedexBlock = result["phedex"]["block"]
                if not phedexBlock:
                    continue
                phedexBlock = phedexBlock[0]
                filesInjected = [x["name"] for x in phedexBlock["file"]]
                for fileInfo in uninjectedFiles[location][dataset][block]["files"]:
                    lfn = fileInfo["lfn"]
                    if lfn in filesInjected:
                        print lfn
    return 0
예제 #2
0
def connectToDB():
    """
    _connectToDB_
    
    Connect to the database specified in the WMAgent config.
    """
    if not os.environ.has_key("WMAGENT_CONFIG"):
        print "Please set WMAGENT_CONFIG to point at your WMAgent configuration."
        sys.exit(1)
        
    if not os.path.exists(os.environ["WMAGENT_CONFIG"]):
        print "Can't find config: %s" % os.environ["WMAGENT_CONFIG"]
        sys.exit(1)

    wmAgentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
    
    if not hasattr(wmAgentConfig, "CoreDatabase"):
        print "Your config is missing the CoreDatabase section."

    socketLoc = getattr(wmAgentConfig.CoreDatabase, "socket", None)
    connectUrl = getattr(wmAgentConfig.CoreDatabase, "connectUrl", None)
    (dialect, junk) = connectUrl.split(":", 1)

    myWMInit = WMInit()
    myWMInit.setDatabaseConnection(dbConfig = connectUrl, dialect = dialect,
                                   socketLoc = socketLoc)
    return
예제 #3
0
 def checkdn(self, username, dn):
     """
     is username associated with dn?
     """
     cfg = loadConfigurationFile(self.store)
     user = cfg.Users.section_(username)
     return user and user.dn == dn # The claimed id matches the provided DN 
예제 #4
0
def main():
    """
    _main_
    """
    if 'WMAGENT_CONFIG' not in os.environ:
        os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'

    config = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])

    # Instantiating central reqmgr and local workqueue
    print "ReqMgr2 URL  : %s" % sanitizeURL(config.JobUpdater.reqMgr2Url)['url']
    print "WorkQueue URL: %s and dbname %s" % (sanitizeURL(config.WorkQueueManager.couchurl)['url'],
                                               config.WorkQueueManager.dbname)

    reqmgr2 = ReqMgr(config.JobUpdater.reqMgr2Url)
    workqueue = WorkQueue(config.WorkQueueManager.couchurl, config.WorkQueueManager.dbname)

    print "\nFirst attempt to update prio of wfs that are not in WMBS and only in local queue"
    priorityCache = {}
    workflowsToUpdate = {}
    workflowsToCheck = [x for x in workqueue.getAvailableWorkflows()]
    print "Retrieved %d workflows from workqueue" % len(workflowsToCheck)

    for workflow, priority in workflowsToCheck:
        if workflow not in priorityCache:
            try:
                priorityCache[workflow] = reqmgr2.getRequestByNames(workflow)[workflow]['RequestPriority']
            except Exception, ex:
                print "Couldn't retrieve the priority of request %s" % workflow
                print "Error: %s" % ex
                continue
        if priority != priorityCache[workflow]:
            workflowsToUpdate[workflow] = priorityCache[workflow]
예제 #5
0
def echoInfo(configfile):
    cfg = loadConfigurationFile(configfile)
    #FIXME: Need to swith between different config files.
    """
    #This is used for config file of wmcoreD
    wconfig = cfg.CoreDatabase
    if sys.argv[2] == "database":
        print wconfig.connectUrl
    elif sys.argv[2] == "dbowner":
        print wconfig.dbowner
    else:
        print "Unknown config option: %s" % sys.argv[2]
    """
    #This is for root.py's configure file
    wconfig = cfg.section_("Webtools") 
    app = wconfig.application 
    appconfig = cfg.section_(app) 
    service = list(appconfig.views.active._internal_children)[0] 
    dbsconfig = getattr(appconfig.views.active, service)
    if sys.argv[2] == "database": 
        if 'database' in dbsconfig._internal_children: 
            print dbsconfig.database.connectUrl 
        else: 
            print dbsconfig.database 
    elif sys.argv[2] == "dbowner": 
        print dbsconfig.dbowner 
    else:
        print "Unknown config option: %s" % sys.argv[2] 
예제 #6
0
def configure(configfile):
    cfg = loadConfigurationFile(configfile)
    web_cfg = cfg.web.dictionary_()

    ###configure cherry py
    cherrypy_cfg = {'server.host' : web_cfg.get('host', '127.0.0.1'),
                    'server.socket_port' : web_cfg.get('port', 8251),
                    'log.screen' : web_cfg.get('log_screen', False),
                    'server.thread_pool' : web_cfg.get('thread_pool', 10)}

    cherrypy.config.update(cherrypy_cfg)

    error_log_level = web_cfg.get('error_log_level', logging.WARNING)
    access_log_level = web_cfg.get("access_log_level", logging.INFO)
    cherrypy.log.error_log.setLevel(error_log_level)
    cherrypy.log.access_log.setLevel(access_log_level)

    migration_cfg = cfg.dbsmigration

    migration_config = {}

    for instance in migration_cfg.instances:
        instance_settings = getattr(migration_cfg.database.instances, instance)
        migration_config.setdefault('database', {}).update({instance :
                                                           {'threads' : instance_settings.threads,
                                                            'dbowner' : instance_settings.dbowner,
                                                            'engineParameters' : instance_settings.engineParameters,
                                                            'connectUrl' : instance_settings.connectUrl}})

    return migration_config
예제 #7
0
    def configure(self, configfile, service):
        cfg = loadConfigurationFile(configfile)
        wconfig = cfg.section_("Webtools")
        app = wconfig.application
        appconfig = cfg.section_(app)
        dbsconfig = getattr(appconfig.views.active, service)
	databasecore = cfg.CoreDatabase
	
	# Eitehr we change formatter 
	# OR change the 'Accept' type to application/json (which we don't know how to do at thi moment)	
	dbsconfig.formatter.object="WMCore.WebTools.RESTFormatter"
        config = Configuration()

	config.section_("CoreDatabase")
	config.CoreDatabase = databasecore
	
        config.component_('DBS')
        config.DBS.application = app
	config.DBS.model       = dbsconfig.model
	config.DBS.formatter   = dbsconfig.formatter
        config.DBS.version     = dbsconfig.version
	config.DBS.default_expires = 300
	# DBS uses owner name, directly from app section at the moment (does not pick it from CoreDatabse)
	config.DBS.dbowner     = databasecore.dbowner
	# Add the CoreDatabase section to DBS
	config.DBS.database = config.CoreDatabase
	
	
        return config
예제 #8
0
def main():
    """
    Retrieve the following information from the agent:
      1. number of jobs in condor
      2. list of distinct workflows in wmbs_workflow (and their status in reqmgr2)
      3. amount of wmbs jobs in each status
      4. list of workflows not fully injected
      5. list of subscriptions not finished
      6. amount of files available in wmbs
      7. amount of files acquired in wmbs
      8. list of blocks not closed in phedex/dbs
      9. list of files not uploaded to dbs
      10. list of files not injected into phedex, with parent block
      11. list of files not injected into phedex, without parent block
    """
    args = parseArgs()

    twiki = ('<pre>', '</pre>') if args.twiki else ('', '')
    print(twiki[0])

    os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'
    config = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])

    gmtTimeNow = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
    print("Drain check time: %s\n" % gmtTimeNow)

    print("*** Amount of jobs in condor per workflow, sorted by condor job status:")
    pprint(getCondorJobs())

    getWMBSInfo(config)

    print(twiki[1])
    print("I'm done!")
    sys.exit(0)
def main():
    """
    Whatever
    """
    if 'WMAGENT_CONFIG' not in os.environ:
        os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'
    config = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])

    if len(sys.argv) != 2:
        print("You must provide a request name")
        sys.exit(1)

    reqName = sys.argv[1]

    globalWQBackend = WorkQueueBackend(config.WorkloadSummary.couchurl, db_name="workqueue")
    localWQBackend = WorkQueueBackend(config.WorkQueueManager.couchurl, db_name="workqueue")
    localWQInbox = WorkQueueBackend(config.WorkQueueManager.couchurl, db_name="workqueue_inbox")

    gqDocIDs = globalWQBackend.getElements(RequestName=reqName)
    localDocIDs = localWQBackend.getElements(RequestName=reqName)
    localInboxDocIDs = localWQInbox.getElements(RequestName=reqName)

    createElementsSummary(reqName, gqDocIDs, globalWQBackend.queueUrl)
    createElementsSummary(reqName, localDocIDs, localWQBackend.queueUrl)
    createElementsSummary(reqName, localInboxDocIDs, localWQInbox.queueUrl)
    
    sys.exit(0)
예제 #10
0
def thrashCouch():
    """
    _thrashCouch_

    """
    jobs = {"new": set(), "created": set(), "executing": set(),
            "complete": set(), "success": set(), "cleanout": set()}

    config = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
    changeState = ChangeState(config)

    myReport = Report()
    myReport.unpersist(os.path.join(getWMBASE(), "test/python/WMComponent_t/JobAccountant_t/fwjrs/LoadTest00.pkl"))

    for i in range(500):
        jobs = createJobs()
        changeState.recordInCouch(jobs, "created", "new")
        changeState.recordInCouch(jobs, "executing", "created")
        changeState.recordInCouch(jobs, "complete", "executing")
        
        for job in jobs:
            job["fwjr"] = myReport
        
        changeState.recordInCouch(jobs, "success", "complete")
            
        for job in jobs:
            job["fwjr"] = None
        
        changeState.recordInCouch(jobs, "cleanout", "success")
        #time.sleep(10)
    return
예제 #11
0
def main():
    config = loadConfigurationFile(os.environ['WMAGENT_CONFIG'])
    config.CoreDatabase.dialect = 'oracle'
    init = WMInit()
    init.setDatabaseConnection(config.CoreDatabase.connectUrl,
                               config.CoreDatabase.dialect)
    couchDB = Database('wmagent_jobdump/fwjrs', '')
    couchDB2 = Database('wmagent_jobdump/jobs', '')
    myThread = threading.currentThread()
    daofactory = DAOFactory(package = "WMCore.WMBS",
                            logger = logging,
                            dbinterface = myThread.dbi)
    getJobsDAO = daofactory(classname = "Jobs.GetAllJobs")
    completedJobs = getJobsDAO.execute(state = 'complete')
    candidates = []
    while len(completedJobs):
        candidates = []
        chunk = completedJobs[:500]
        completedJobs = completedJobs[500:]
        result = couchDB.loadView('FWJRDump', 'outputByJobID', keys = chunk)
        rows = result['rows']
        for entry in rows:
            candidates.append(entry['key'])
        for jobId in candidates:
            doc = couchDB2.document(str(jobId))
            last = max(map(int, doc['states'].keys()))
            lastState = doc['states'][str(last)]['newstate']
            if lastState == 'success':
                print jobId
def main():
    """
    Whatever
    """
    if 'WMAGENT_CONFIG' not in os.environ:
        os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'
    config = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])

    # Get local workqueue and workqueue_inbox docs
    localWQBackend = WorkQueueBackend(config.WorkQueueManager.couchurl, db_name="workqueue")
    localWQInboxDB = WorkQueueBackend(config.WorkQueueManager.couchurl, db_name="workqueue_inbox")
    wqDocIDs = localWQBackend.getElements()
    wqInboxDocIDs = localWQInboxDB.getElements()

    # Build and print a summary of these elements
    logging.info("************* LOCAL workqueue elements summary ************")
    foundStatus = createElementsSummary(wqInboxDocIDs, 'workqueue_inbox')
    foundStatus = createElementsSummary(wqDocIDs, 'workqueue')

    # Now investigate docs in the workqueue database
    for status in foundStatus:
        logging.info("\n************* workqueue elements summary by status: %s ************", status)
        elemByStatus = [x for x in wqDocIDs if x['Status'] == status]
        byStatusSummary(elemByStatus, localWQInboxDB=localWQInboxDB)

    # time to look up at central global queue
    logging.info("\n************* GLOBAL workqueue elements summary ************")
    globalWQBackend = WorkQueueBackend(config.WorkloadSummary.couchurl, db_name="workqueue")
    gqDocIDs = globalWQBackend.getElements(status='Available')
    _ = createElementsSummary(gqDocIDs, 'workqueue')
    #logging.info("Found %d 'Available' docs in global workqueue database", len(gqDocIDs))
    byStatusSummary(gqDocIDs)
    
    sys.exit(0)
예제 #13
0
 def test02(self):
     """test02 : start migration and finish it"""
     cfg = loadConfigurationFile(writerconfig)
     setupDB(cfg)
     migrator = DBSMigrationEngine(cfg)
     migrator.setup("NONE")
     migrator.algorithm("NONE")
예제 #14
0
    def configure(self, configfile, service, dbinstance):
        cfg = loadConfigurationFile(configfile)
        wconfig = cfg.section_("Webtools")
        app = wconfig.application
        
        appconfig = cfg.section_(app)
        dbsconfig = getattr(appconfig.views.active, service)

        # Either we change formatter
        # OR change the 'Accept' type to application/json (which we don't know how to do at the moment)
        dbsconfig.formatter.object="WMCore.WebTools.RESTFormatter"
        config = Configuration()
         
        config.component_('SecurityModule')
        config.SecurityModule.dangerously_insecure = True

        config.component_('DBS')
        config.DBS.application = app
        config.DBS.model       = dbsconfig.model
        config.DBS.formatter   = dbsconfig.formatter

        #Does not support instances
        #config.DBS.instances   = cfg.dbs.instances
        #config.DBS.database    = dbsconfig.database

        if self.migration_test:
            #Use one specific database cms_dbs3_dev_phys02@int2r for migration unittests
            from DBSSecrets import dbs3_dp2_i2
            config.DBS.section_('database')
            config.DBS.database.connectUrl = dbs3_dp2_i2['connectUrl']['writer']
            config.DBS.database.dbowner = dbs3_dp2_i2['databaseOwner']
            config.DBS.database.engineParameters = { 'pool_size' : 15, 'max_overflow' : 10, 'pool_timeout' : 200 }
            version = getattr(dbsconfig.database.instances, dbinstance).version
            config.DBS.database.version = version if version else '3.99.98'

            config.DBS.section_('security')
            config.DBS.security.params = {}

        else:
            #Use dev/global from dbs configuration for the reader, writer and dao unittests
            dbconfig = getattr(dbsconfig.database.instances, dbinstance)
            config.DBS.section_('database')
            config.DBS.database.connectUrl = dbconfig.connectUrl
            config.DBS.database.dbowner = dbconfig.dbowner
            config.DBS.database.engineParameters = dbconfig.engineParameters
            config.DBS.database.version = dbconfig.version if dbconfig.version else '3.99.98'
            #config.DBS.database.instance = dbconfig.instance

            try:
                secconfig = getattr(dbsconfig.security.instances, dbinstance)
            except AttributeError:
                pass
            else:
                config.DBS.section_('security')
                config.DBS.security.params = secconfig.params

        config.DBS.default_expires = 900

        return config
예제 #15
0
    def loadConfig(self, configname, overrideargs = None):
        """
        Load the configuration file
        """
        ## If the configuration is alredy an object it doesn't need to be loaded from the file.
        if isinstance(configname, Configuration):
            self.configuration = configname
            valid, configmsg = self.validateConfig()
            if not valid:
                configmsg += "\nThe documentation about the CRAB configuration file can be found in"
                configmsg += " https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile"
                raise ConfigurationException(configmsg)
            return

        if not os.path.isfile(configname):
            raise ConfigurationException("CRAB configuration file %s not found." % (configname))
        self.logger.info("Will use CRAB configuration file %s" % (configname))
        try:
            self.logger.debug("Loading CRAB configuration file.")
            self.configuration = loadConfigurationFile(os.path.abspath(configname))
            ## Overwrite configuration parameters passed as arguments in the command line.
            if overrideargs:
                for singlearg in overrideargs:
                    ## The next line is needed, because we allow the config to be passed as argument
                    ## instead via the --config option.
                    if singlearg == configname: continue
                    if len(singlearg.split('=',1)) == 1:
                        self.logger.info("Wrong format in command-line argument '%s'. Expected format is <section-name>.<parameter-name>=<parameter-value>." % (singlearg))
                        if len(singlearg) > 1 and singlearg[0] == '-':
                            self.logger.info("If the argument '%s' is an option to the %s command, try 'crab %s %s [value for %s option (if required)] [arguments]'." \
                                             % (singlearg, self.__class__.__name__, self.__class__.__name__, singlearg, singlearg))
                        raise ConfigurationException("ERROR: Wrong command-line format.")
                    fullparname, parval = singlearg.split('=',1)
                    # now supporting just one sub params, eg: Data.inputFiles, User.email, ...
                    parnames = fullparname.split('.', 1)
                    if len(parnames) == 1:
                        self.logger.info("Wrong format in command-line argument '%s'. Expected format is <section-name>.<parameter-name>=<parameter-value>." % (singlearg))
                        raise ConfigurationException("ERROR: Wrong command-line format.")
                    self.configuration.section_(parnames[0])
                    type = configParametersInfo.get(fullparname, {}).get('type', 'undefined')
                    if type in ['undefined', 'StringType']:
                        setattr(getattr(self.configuration, parnames[0]), parnames[1], literal_eval("\'%s\'" % parval))
                        self.logger.debug("Overriden parameter %s with '%s'" % (fullparname, parval))
                    else:
                        setattr(getattr(self.configuration, parnames[0]), parnames[1], literal_eval("%s" % parval))
                        self.logger.debug("Overriden parameter %s with %s" % (fullparname, parval))
            valid, configmsg = self.validateConfig() ## Subclasses of SubCommand overwrite this method if needed.
        except RuntimeError as re:
            configmsg  = "Error while loading CRAB configuration:\n%s" % (self._extractReason(configname, re))
            configmsg += "\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3CommonErrors#Syntax_error_in_CRAB_configurati"
            configmsg += "\nSee the ./crab.log file for more details."
            configmsg += "\nThe documentation about the CRAB configuration file can be found in"
            configmsg += " https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile"
            raise ConfigurationException(configmsg)
        else:
            if not valid:
                configmsg += "\nThe documentation about the CRAB configuration file can be found in"
                configmsg += " https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile"
                raise ConfigurationException(configmsg)
예제 #16
0
def main():
    parser = optparse.OptionParser()
    parser.add_option("-c", "--config", dest="config", help="TaskWorker configuration file.")
    opts, args = parser.parse_args()

    configuration = loadConfigurationFile( os.path.abspath(opts.config) )
    renewer = CRAB3ProxyRenewer(configuration)
    renewer.execute()
예제 #17
0
 def checkpass(self, username, password):
     """
     is the password correct for username
     """
     cfg = loadConfigurationFile(self.store)
     user = cfg.Users.section_(username)
     return hasattr(user,'password') and user.password==crypt(password, 
                                                              user.password)
예제 #18
0
 def load(self, user):
     cfg = loadConfigurationFile(self.store)
     user = cfg.Users.section_(user)
     # Cannot pass non str objects since the openid library does a call
     # for object.encode('UTF-8') when preparing the response to send
     return {'permissions'  : user.permissions.dictionary_(),
             'fullname'  : user.fullname,
             'dn'  : user.dn
             }
예제 #19
0
    def test04(self):
        """test04: lets attempt remigration of the same dataset, it should not raise any errors"""
        data = dict(
            migration_url="http://vocms09.cern.ch:8585/DBS",
            migration_input="/RelValSinglePiPt100/CMSSW_3_1_0_pre9_IDEAL_31X_v1/GEN-SIM-DIGI-RAW-HLTDEBUG",
        )
        migapi.insert("submit", data)

        cfg = loadConfigurationFile(writerconfig)
        setupDB(cfg)
        migrator = DBSMigrationEngine(cfg)
        migrator.setup("NONE")
        migrator.algorithm("NONE")
예제 #20
0
    def loadConfig(self, configname, overrideargs = None):
        """
        Load the configuration file
        """
        #The configuration is alredy an object and don't need to be loaded from the file
        if isinstance(configname, Configuration):
            self.configuration = configname
            valid, configmsg = self.validateConfig()
            if not valid:
                raise ConfigurationException(configmsg)
            return

        if not os.path.isfile(configname):
            raise ConfigurationException("Configuration file '%s' not found" % configname)
        self.logger.info('Will use configuration file %s' % configname)
        try:
            self.logger.debug('Loading configuration')
            self.configuration = loadConfigurationFile(os.path.abspath(configname))
            ## Overwrite configuration parameters passed as arguments in the command line. 
            if overrideargs:
                for singlearg in overrideargs:
                    if singlearg == configname: continue
                    if len(singlearg.split('=',1)) == 1:
                        self.logger.info('Wrong format in command-line argument \'%s\'. Expected format is <section-name>.<parameter-name>=<parameter-value>.' % singlearg)
                        if len(singlearg) > 1 and singlearg[0] == '-':
                            self.logger.info('If the argument \'%s\' is an option to the %s command, try \'crab %s %s [value for %s option (if required)] [arguments]\'.' \
                                             % (singlearg, self.__class__.__name__, self.__class__.__name__, singlearg, singlearg))
                        raise ConfigurationException('ERROR: Wrong command-line format.')
                    fullparname, parval = singlearg.split('=',1)
                    # now supporting just one sub params, eg: Data.inputFiles, User.email, ...
                    parnames = fullparname.split('.', 1)
                    if len(parnames) == 1:
                        self.logger.info('Wrong format in command-line argument \'%s\'. Expected format is <section-name>.<parameter-name>=<parameter-value>' % singlearg)
                        raise ConfigurationException('ERROR: Wrong command-line format.')
                    self.configuration.section_(parnames[0])
                    type = 'undefined'
                    for k in parameters_mapping['on-server'].keys():
                        if fullparname in parameters_mapping['on-server'][k]['config']:
                            type = parameters_mapping['on-server'][k]['type']
                            break
                    if type in ['undefined','StringType']:
                        setattr(getattr(self.configuration, parnames[0]), parnames[1], literal_eval("\'%s\'" % parval))
                        self.logger.debug('Overriden parameter %s with \'%s\'' % (fullparname, parval))
                    else:
                        setattr(getattr(self.configuration, parnames[0]), parnames[1], literal_eval("%s" % parval))
                        self.logger.debug('Overriden parameter %s with %s' % (fullparname, parval))
            valid, configmsg = self.validateConfig() #subclasses of SubCommand overrhide this if needed
        except RuntimeError, re:
            msg = self._extractReason(configname, re)
            raise ConfigurationException("Configuration syntax error:\n%s\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3CommonErrors#Configuration_syntax_error\nSee the ./crab.log file for more details" % msg)
예제 #21
0
    def testE(self):
        """test save/load """
        testValues = [
            "string", 123, 123.456,
            ["list", 789, 10.1 ],
            { "dict1" : "value", "dict2" : 10.0 }
            ]
        config = Configuration()
        for x in range(0, 5):
            config.section_("Section%s" % x)
            config.component_("Component%s" % x)
            sect = getattr(config, "Section%s" % x)
            comp = getattr(config, "Component%s" % x)
            sect.document_("This is Section%s" % x)
            comp.document_("This is Component%s" % x)

            for i in range(0, 5):
                setattr(comp, "Parameter%s" % i, testValues[i])
                setattr(sect, "Parameter%s" % i, testValues[i])
                comp.document_("This is Parameter%s" % i,
                               "Parameter%s" %i)
                sect.document_("This is Parameter%s" %i,
                               "Parameter%s" %i)

        stringSave = str(config)
        documentSave = config.documentedString_()
        commentSave = config.commentedString_()

        saveConfigurationFile(config, self.normalSave)
        saveConfigurationFile(config, self.docSave, document = True)
        saveConfigurationFile(config, self.commentSave, comment = True)

        plainConfig = loadConfigurationFile(self.normalSave)

        docConfig = loadConfigurationFile(self.docSave)

        commentConfig = loadConfigurationFile(self.commentSave)
예제 #22
0
    def insertRunConfig(self, runNumber):
        """
        _insertRunConfig_

        Insert a run config into T0AST.  Pull down the offline configuration as
        well as the online configuration and insert everything into T0AST.
        """
        logging.debug("Run %s does not exist in T0AST" % runNumber)
        logging.debug("Pulling from RunSummary/ConfDB")

        # transfer system sets these, so they should always be present
        versionAndHLTKey = ListRunConfig.retrieveVersionAndHLTKey(self.t0astDBConn,
                                                                  runNumber)                  
        onlineVersion = versionAndHLTKey[0][0]
        hltkey = versionAndHLTKey[0][1]
        logging.debug( "onlineVersion: %s hltkey: %s" % ( onlineVersion, hltkey ) )

        tier0Config = loadConfigurationFile(self.offlineConfDB)

        repackVersion = tier0Config.Global.RepackVersionMappings.get(onlineVersion, None)
        expressVersion = tier0Config.Global.ExpressVersionMappings.get(onlineVersion, None)

        InsertRunConfig.updateRepackExpressVersion(self.t0astDBConn, runNumber,
                                                   repackVersion, expressVersion)

        configuration = ConfDB.getConfiguration(runNumber, hltkey)
                
        if configuration == None:
            raise RuntimeError, "Could not retrieve HLT config for run %s" % runNumber

        InsertRunConfig.insertRunConfig(self.t0astDBConn, runNumber,
                                        configuration[0], configuration[1],
                                        tier0Config.Global.AcquisitionEra)

        InsertRunConfig.insertT0Config(self.t0astDBConn, runNumber,
                                       tier0Config.Global.Version)

        for streamName in configuration[1].keys():
            streamConfig = retrieveStreamConfig(tier0Config, streamName)
            InsertRunConfig.insertStreamConfig(self.t0astDBConn, runNumber,
                                               streamName, streamConfig)

            for datasetName in configuration[1][streamName]:
                datasetConfig = retrieveDatasetConfig(tier0Config, datasetName)
                InsertRunConfig.insertDatasetConfig(self.t0astDBConn, runNumber,
                                                    datasetConfig)
                        
        self.t0astDBConn.commit()
        return
예제 #23
0
def echoInfo(configfile):
    #import pdb
    #pdb.set_trace()
    cfg = loadConfigurationFile(configfile)
    wconfig = cfg.section_("Webtools")
    app = wconfig.application
    appconfig = cfg.section_(app)
    service = list(appconfig.views.active._internal_children)[0]
    dbsconfig = getattr(appconfig.views.active, service)   
    if sys.argv[2] == "database":
	if 'database' in dbsconfig._internal_children:
	    print dbsconfig.database.connectUrl
	else:
	    print dbsconfig.database
    elif sys.argv[2] == "dbowner":
	print dbsconfig.dbowner
    else:
	print "Unknown config option: %s" % sys.argv[2]
예제 #24
0
    def loadConfig(self, configname, overrideargs = None):
        """
        Load the configuration file
        """

        if not os.path.isfile(configname):
            raise ConfigurationException("Configuration file '%s' not found" % configname)
        self.logger.info('Will use configuration file %s' % configname)
        try:
            self.logger.debug('Loading configuration')
            self.configuration = loadConfigurationFile(os.path.abspath(configname))
            if overrideargs:
                for singlearg in overrideargs:
                    if singlearg == configname: continue
                    if len(singlearg.split('=',1)) == 1:
                        self.logger.info('Wrong format in command-line argument \'%s\'. Expected format is <section-name>.<parameter-name>=<parameter-value>.' % singlearg)
                        if len(singlearg) > 1 and singlearg[0] == '-':
                            self.logger.info('If the argument \'%s\' is an option to the %s command, try \'crab %s %s [value for %s option (if required)] [arguments]\'.' \
                                             % (singlearg, self.__class__.__name__, self.__class__.__name__, singlearg, singlearg))
                        raise ConfigurationException('ERROR: Wrong command-line format.')
                    fullparname, parval = singlearg.split('=',1)
                    # now supporting just one sub params, eg: Data.inputFiles, User.email, ...
                    parnames = fullparname.split('.', 1)
                    if len(parnames) == 1:
                        self.logger.info('Wrong format in command-line argument \'%s\'. Expected format is <section-name>.<parameter-name>=<parameter-value>' % singlearg)
                        raise ConfigurationException('ERROR: Wrong command-line format.')
                    self.configuration.section_(parnames[0])
                    type = 'undefined'
                    for k in self.requestmapper.keys():
                        if self.requestmapper[k]['config'] == fullparname:
                            type = self.requestmapper[k]['type']
                            break
                    if type in ['undefined','StringType']:
                        setattr(getattr(self.configuration, parnames[0]), parnames[1], literal_eval("\'%s\'" % parval))
                        self.logger.debug('Overriden parameter %s with \'%s\'' % (fullparname, parval))
                    else:
                        setattr(getattr(self.configuration, parnames[0]), parnames[1], literal_eval("%s" % parval))
                        self.logger.debug('Overriden parameter %s with %s' % (fullparname, parval))
            valid, configmsg = self.validateConfig() #subclasses of SubCommand overrhide this if needed
        except RuntimeError, re:
            msg = self._extractReason(configname, re)
            raise ConfigurationException("Configuration syntax error: \n %s.\nSee the ./crab.log file for more details" % msg)
예제 #25
0
def killWorkflowAgent(WorkflowName):
    """
    Cancel work for a given workflow - delete in wmbs, delete from workqueue db, set canceled in inbox
    """
    # get configuration file path
    if not os.environ.has_key("WMAGENT_CONFIG"):
        os.environ["WMAGENT_CONFIG"] = "/data/srv/wmagent/current/config/wmagent/config.py"

    # load config
    wmConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
    wqManager = wmConfig.section_("WorkQueueManager")

    couchUrl = wqManager.couchurl
    dbname = wqManager.dbname
    inboxDatabase = wqManager.inboxDatabase
    parentQueueCouchUrl = wqManager.queueParams["ParentQueueCouchUrl"]

    # Creates backend
    backend = WorkQueueBackend(couchUrl, dbname, inboxDatabase, parentQueueCouchUrl)

    args = {}
    args["RequestName"] = WorkflowName
    elements = backend.getElements(**args)

    # take wf from args in case no elements exist for workflow (i.e. work was negotiating)
    requestNames = set([x["RequestName"] for x in elements]) | set([wf for wf in [WorkflowName]])
    if not requestNames:
        print "Workflow is not at the backend"

    inbox_elements = []
    for wf in requestNames:
        inbox_elements.extend(backend.getInboxElements(WorkflowName=wf))

    print "Canceling work for workflow: %s" % (requestNames)
    for workflow in requestNames:
        try:
            connectToDB()
            jobDumpConfig = wmConfig
            bossAirConfig = wmConfig
            killWorkflow(workflow, jobDumpConfig, bossAirConfig)
        except Exception, ex:
            print "Aborting %s wmbs subscription failed: %s" % (workflow, str(ex))
예제 #26
0
    def setUp(self):
        """
        _setUp_

        """
        super(WMBSHelperTest, self).setUp()

        self.testInit = TestInitCouchApp(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase=True)
        self.testInit.setupCouch("wmbshelper_t/jobs", "JobDump")
        self.testInit.setupCouch("wmbshelper_t/fwjrs", "FWJRDump")
        self.testInit.setupCouch("config_test", "GroupUser", "ConfigCache")
        os.environ["COUCHDB"] = "wmbshelper_t"
        self.testInit.setSchema(customModules=["WMCore.WMBS",
                                               "WMComponent.DBS3Buffer",
                                               "WMCore.BossAir",
                                               "WMCore.ResourceControl"],
                                useDefault=False)

        self.workDir = self.testInit.generateWorkDir()

        self.wmspec = self.createWMSpec()
        self.topLevelTask = getFirstTask(self.wmspec)
        self.inputDataset = self.topLevelTask.inputDataset()
        self.dataset = self.topLevelTask.getInputDatasetPath()
        self.dbs = DBSReader(self.inputDataset.dbsurl)
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=threading.currentThread().logger,
                                     dbinterface=threading.currentThread().dbi)

        self.configFile = EmulatorSetup.setupWMAgentConfig()
        self.config = loadConfigurationFile(self.configFile)

        self.config.component_("JobSubmitter")
        self.config.JobSubmitter.submitDir = self.workDir
        self.config.JobSubmitter.submitScript = os.path.join(getTestBase(),
                                                             'WMComponent_t/JobSubmitter_t',
                                                             'submit.sh')

        return
    def getConfig(self):
        """
        _createConfig_
        General config file
        """
        config = loadConfigurationFile('configuration/Example.py')
        config.DBSPublisher.serviceCert = orig_x509_user_proxy
        config.DBSPublisher.serviceKey = orig_x509_user_proxy
        config.DBSPublisher.opsProxy = orig_x509_user_proxy
        config.DBSPublisher.algoName = 'FIFOPriority'
        config.DBSPublisher.pollInterval = 10
        config.DBSPublisher.publication_pool_size = 1
        config.DBSPublisher.componentDir = "test/data"
        config.DBSPublisher.namespace = 'AsyncStageOut.DBSPublisher'
        config.DBSPublisher.log_level = logging.DEBUG
        config.DBSPublisher.files_database = "asynctransfer_1"
        config.DBSPublisher.couch_instance = os.getenv("COUCHURL")
        config.DBSPublisher.publication_max_retry = 0
        config.DBSPublisher.serviceCert = orig_x509_user_proxy
        config.DBSPublisher.max_files_per_block = 10
        config.DBSPublisher.workflow_expiration_time = 3

        return config
예제 #28
0
    def testKillWorkflow(self):
        """
        _testKillWorkflow_

        Verify that workflow killing works correctly.
        """
        configFile = EmulatorSetup.setupWMAgentConfig()

        config = loadConfigurationFile(configFile)

        baAPI = BossAirAPI(config = config)

        # Create nine jobs
        self.setupForKillTest(baAPI = baAPI)
        self.assertEqual(len(baAPI._listRunJobs()), 9)
        killWorkflow("Main", config, config)

        self.verifyFileKillStatus()
        self.verifyJobKillStatus()
        self.assertEqual(len(baAPI._listRunJobs()), 8)

        EmulatorSetup.deleteConfig(configFile)
        return
예제 #29
0
    def loadConfig(self, configname, overrideargs=None):
        """
        Load the configuration file
        """

        if not os.path.isfile(configname):
            raise ConfigurationException("Configuration file '%s' not found" % configname)

        try:
            self.logger.debug('Loading configuration')
            self.configuration = loadConfigurationFile( os.path.abspath(configname))
            if overrideargs:
                for singlearg in overrideargs:
                    fullparname, parval = singlearg.split('=')
                    # now supporting just one sub params, eg: Data.inputFiles, User.email, ...
                    parnames = fullparname.split('.', 1)
                    self.configuration.section_(parnames[0])
                    setattr(getattr(self.configuration, parnames[0]), parnames[1], parval)
                    self.logger.debug('Overriden parameter %s with %s' % (fullparname, parval))
            valid, configmsg = self.validateConfig() #subclasses of SubCommand overrhide this if needed
        except RuntimeError, re:
            msg = self._extractReason(configname, re)
            raise ConfigurationException("Configuration syntax error: \n %s.\nSee the crab.log file for more details" % msg)
예제 #30
0
def update_software(config_file):
    """
    Functions retrieves CMSSW versions and scramarchs from CMS tag collector.
    
    """
    config = loadConfigurationFile(config_file)
    # source of the data
    tag_collector_url = config.views.data.tag_collector_url
    # store the data into CouchDB auxiliary database under "software" document
    couch_host = config.views.data.couch_host
    reqmgr_aux_db = config.views.data.couch_reqmgr_aux_db
    
    # get data from tag collector
    all_archs_and_versions = _get_all_scramarchs_and_versions(tag_collector_url)
    if not all_archs_and_versions:
        return
    
    # get data already stored in CouchDB    
    couchdb = Database(dbname=reqmgr_aux_db, url=couch_host)
    try:
        sw_already_stored = couchdb.document("software")
        del sw_already_stored["_id"]
        del sw_already_stored["_rev"]
    except CouchNotFoundError:
        logging.error("Document id software, does not exist, creating it ...")
        doc = Document(id="software", inputDict=all_archs_and_versions)
        couchdb.commitOne(doc)
        return
    
    # now compare recent data from tag collector and what we already have stored
    # sorting is necessary
    if sorted(all_archs_and_versions) != sorted(sw_already_stored):
        logging.debug("ScramArch/CMSSW releases changed, updating software document ...")
        doc = Document(id="software", inputDict=all_archs_and_versions)
        couchdb.commitOne(doc)
    """
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
from optparse import OptionParser
from WMCore.Services.WMStats.WMStatsWriter import WMStatsWriter
from WMCore.Configuration import loadConfigurationFile

if __name__ == "__main__":

    if "WMAGENT_CONFIG" not in os.environ:
        print("The WMAGENT_CONFIG environment variable needs to be set before this can run")
        sys.exit(1)

    wmagentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])

    if hasattr(wmagentConfig, "AnalyticsDataCollector") and hasattr(wmagentConfig.AnalyticsDataCollector, "centralWMStatsURL"):
        wmstats = WMStatsWriter(wmagentConfig.AnalyticsDataCollector.centralWMStatsURL)
    else:
        print("AnalyticsDataCollector.centralWMStatsURL is not specified")
        sys.exit(1)

    parser = OptionParser()
    parser.set_usage("wmstats-request-status-chagne [agent_url:port]")

    parser.add_option("-r", "--request", dest = "request",
                      help = "resquest name")

    parser.add_option("-s", "--status", dest = "newstatus",
                      help = "set to new status")
예제 #32
0
import uuid
import time
import logging
import sys
import json
import traceback
import argparse
import re

import dbs.apis.dbsClient as dbsClient
from Publisher.utils import getProxy
from ServerUtilities import getHashLfn, encodeRequest, oracleOutputMapping
from RESTInteractions import HTTPRequests
from WMCore.Configuration import loadConfigurationFile

config = loadConfigurationFile(os.path.abspath('config.py'))

task_path = config.General.task_path


def Proxy(userDN, group, role, logger):
    """

    """
    userProxy = ''

    try:
        serviceCert = config.General.serviceCert
        serviceKey = config.General.serviceKey

        defaultDelegation = {
예제 #33
0
def main():
    # Re-exec if we don't have unbuffered i/o. This is essential to get server
    # to output its logs synchronous to its operation, such that log output does
    # not remain buffered in the python server. This is particularly important
    # when infrequently accessed server redirects output to 'rotatelogs'.
    if 'PYTHONUNBUFFERED' not in os.environ:
        os.environ['PYTHONUNBUFFERED'] = "1"
        os.execvp("python", ["python"] + sys.argv)

    opt = ArgumentParser(usage=__doc__)
    opt.add_argument("-q",
                     "--quiet",
                     action="store_true",
                     dest="quiet",
                     default=False,
                     help="be quiet, don't print unnecessary output")
    opt.add_argument("-v",
                     "--verify",
                     action="store_true",
                     dest="verify",
                     default=False,
                     help="verify daemon is running, restart if not")
    opt.add_argument("-s",
                     "--status",
                     action="store_true",
                     dest="status",
                     default=False,
                     help="check if the server monitor daemon is running")
    opt.add_argument("-k",
                     "--kill",
                     action="store_true",
                     dest="kill",
                     default=False,
                     help="kill any existing already running daemon")
    opt.add_argument("-r",
                     "--restart",
                     action="store_true",
                     dest="restart",
                     default=False,
                     help="restart, kill any existing running daemon first")
    opt.add_argument(
        "-d",
        "--dir",
        dest="statedir",
        metavar="DIR",
        default=os.getcwd(),
        help="server state directory (default: current working directory)")
    opt.add_argument(
        "-l",
        "--log",
        dest="logfile",
        metavar="DEST",
        default=None,
        help="log to DEST, via pipe if DEST begins with '|', otherwise a file")
    opts, args = opt.parse_known_args()

    if len(args) != 1:
        print("%s: exactly one configuration file required" % sys.argv[0],
              file=sys.stderr)
        sys.exit(1)

    if not os.path.isfile(args[0]) or not os.access(args[0], os.R_OK):
        print("%s: %s: invalid configuration file" % (sys.argv[0], args[0]),
              file=sys.stderr)
        sys.exit(1)

    if not opts.statedir or \
            not os.path.isdir(opts.statedir) or \
            not os.access(opts.statedir, os.W_OK):
        print("%s: %s: invalid state directory" % (sys.argv[0], opts.statedir),
              file=sys.stderr)
        sys.exit(1)

    # Create server object.
    cfg = loadConfigurationFile(args[0])
    app = cfg.main.application.lower()
    server = RESTDaemon(cfg, opts.statedir)

    # Now actually execute the task.
    if opts.status:
        # Show status of running daemon, including exit code matching the
        # daemon status: 0 = running, 1 = not running, 2 = not running but
        # there is a stale pid file. If silent don't print out anything
        # but still return the right exit code.
        running, pid = server.daemon_pid()
        if running:
            if not opts.quiet:
                print("%s is %sRUNNING%s, PID %d" \
                      % (app, COLOR_OK, COLOR_NORMAL, pid))
            sys.exit(0)
        elif pid != None:
            if not opts.quiet:
                print("%s is %sNOT RUNNING%s, stale PID %d" \
                      % (app, COLOR_WARN, COLOR_NORMAL, pid))
            sys.exit(2)
        else:
            if not opts.quiet:
                print("%s is %sNOT RUNNING%s" \
                      % (app, COLOR_WARN, COLOR_NORMAL))
            sys.exit(1)

    elif opts.kill:
        # Stop any previously running daemon. If quiet squelch messages,
        # except removal of stale pid file cannot be silenced.
        server.kill_daemon(silent=opts.quiet)

    else:
        # We are handling a server start, in one of many possible ways:
        # normal start, restart (= kill any previous daemon), or verify
        # (= if daemon is running leave it alone, otherwise start).

        # Convert 'verify' to 'restart' if the server isn't running.
        if opts.verify:
            opts.restart = True
            if server.daemon_pid()[0]:
                sys.exit(0)

        # If restarting, kill any previous daemon, otherwise complain if
        # there is a daemon already running here. Starting overlapping
        # daemons is not supported because pid file would be overwritten
        # and we'd lose track of the previous daemon.
        if opts.restart:
            server.kill_daemon(silent=opts.quiet)
        else:
            running, pid = server.daemon_pid()
            if running:
                print(
                    "Refusing to start over an already running daemon, pid %d"
                    % pid,
                    file=sys.stderr)
                sys.exit(1)

        # If we are (re)starting and were given a log file option, convert
        # the logfile option to a list if it looks like a pipe request, i.e.
        # starts with "|", such as "|rotatelogs foo/bar-%Y%m%d.log".
        if opts.logfile:
            if opts.logfile.startswith("|"):
                server.logfile = re.split(r"\s+", opts.logfile[1:])
            else:
                server.logfile = opts.logfile

        # Actually start the daemon now.
        server.start_daemon()
예제 #34
0
                      "--kill",
                      action="store_true",
                      dest="kill",
                      default=False,
                      help="Kill the daemon")
    parser.add_option("-t",
                      "--terminate",
                      action="store_true",
                      dest="terminate",
                      default=False,
                      help="Terminate the daemon (kill, wait, kill -9)")
    opts, args = parser.parse_args()

    if not opts.inifile:
        sys.exit('No configuration specified')
    cfg = loadConfigurationFile(opts.inifile)

    component = cfg.Webtools.application
    workdir = getattr(cfg.Webtools, 'componentDir', '/tmp/webtools')
    if workdir == None:
        workdir = '/tmp/webtools'
    root = Root(cfg)
    if opts.status:
        daemon = Details('%s/Daemon.xml' % workdir)

        if not daemon.isAlive():
            print("Component:%s Not Running" % component)
        else:
            print("Component:%s Running:%s" % (component, daemon['ProcessID']))
    elif opts.kill:
        daemon = Details('%s/Daemon.xml' % workdir)
예제 #35
0
the AsyncTransfer database to transfer.

WARNING: this will create files in you user area and transfer them with FTS.
"""

import random
from WMCore.Database.CMSCouch import CouchServer
from WMCore.Configuration import loadConfigurationFile
from WMCore.Services.PhEDEx.PhEDEx import PhEDEx
from WMCore.Storage.TrivialFileCatalog import readTFC
import subprocess, os, errno
import logging
import traceback
import datetime

config = loadConfigurationFile(os.environ.get('WMAGENT_CONFIG')).AsyncTransfer
server = CouchServer(config.couch_instance)
db = server.connectDatabase(config.files_database)
proxy = config.serviceCert
emptyFile = config.ftscp
logging.basicConfig(level=config.log_level)
logger = logging.getLogger('AsyncTransfer-TransferDummyData')


def apply_tfc(site_file, site_tfc_map, site):
    """
    Take a CMS_NAME:lfn string and make a pfn
    """
    site_tfc_map[site] = get_tfc_rules(site)
    site, lfn = tuple(site_file.split(':'))
예제 #36
0
def _getConfiguration():
    configFile = os.environ.get("EMULATOR_CONFIG")
    if configFile:
        return loadConfigurationFile(configFile)
    return None
예제 #37
0
    def algorithm(self, parameters=None):
        """
        _algorithm_

        """
        logging.debug("Running Tier0Feeder algorithm...")
        myThread = threading.currentThread()

        findNewRunsDAO = self.daoFactory(classname="Tier0Feeder.FindNewRuns")
        findNewRunStreamsDAO = self.daoFactory(
            classname="Tier0Feeder.FindNewRunStreams")
        findNewExpressRunsDAO = self.daoFactory(
            classname="Tier0Feeder.FindNewExpressRuns")
        releaseExpressDAO = self.daoFactory(
            classname="Tier0Feeder.ReleaseExpress")
        feedStreamersDAO = self.daoFactory(
            classname="Tier0Feeder.FeedStreamers")
        markWorkflowsInjectedDAO = self.daoFactory(
            classname="Tier0Feeder.MarkWorkflowsInjected")

        tier0Config = None
        try:
            tier0Config = loadConfigurationFile(self.tier0ConfigFile)
        except:
            # usually happens when there are syntax errors in the configuration
            logging.exception(
                "Cannot load Tier0 configuration file, not configuring new runs and run/streams"
            )

        # only configure new runs and run/streams if we have a valid Tier0 configuration
        if tier0Config != None:

            #
            # we don't inject data if the Tier0Config is unreadable
            #
            # discover new data from StorageManager and inject into Tier0
            # (if the config specifies a list of runs do it only once)
            #
            # replays call data discovery only once (and ignore data status)
            #
            try:
                if tier0Config.Global.InjectRuns == None:
                    StorageManagerAPI.injectNewData(
                        self.dbInterfaceStorageManager,
                        self.dbInterfaceHltConf,
                        self.dbInterfaceSMNotify,
                        streamerPNN=tier0Config.Global.StreamerPNN,
                        minRun=tier0Config.Global.InjectMinRun,
                        maxRun=tier0Config.Global.InjectMaxRun)
                else:
                    injectRuns = set()
                    for injectRun in tier0Config.Global.InjectRuns:
                        if injectRun not in self.injectedRuns:
                            injectRuns.add(injectRun)
                    for injectRun in injectRuns:
                        StorageManagerAPI.injectNewData(
                            self.dbInterfaceStorageManager,
                            self.dbInterfaceHltConf,
                            self.dbInterfaceSMNotify,
                            streamerPNN=tier0Config.Global.StreamerPNN,
                            injectRun=injectRun)
                        self.injectedRuns.add(injectRun)
            except:
                # shouldn't happen, just a catch all insurance
                logging.exception(
                    "Something went wrong with data retrieval from StorageManager"
                )

            #
            # find new runs, setup global run settings and stream/dataset/trigger mapping
            #
            runHltkeys = findNewRunsDAO.execute(transaction=False)
            for run, hltkey in sorted(runHltkeys.items()):

                hltConfig = None

                # local runs have no hltkey and are configured differently
                if hltkey != None:

                    # retrieve HLT configuration and make sure it's usable
                    try:
                        hltConfig = self.getHLTConfigDAO.execute(
                            hltkey, transaction=False)
                        if hltConfig['process'] == None or len(
                                hltConfig['mapping']) == 0:
                            raise RuntimeError(
                                "HLTConfDB query returned no process or mapping"
                            )
                    except:
                        logging.exception(
                            "Can't retrieve hltkey %s for run %d" %
                            (hltkey, run))
                        continue

                try:
                    RunConfigAPI.configureRun(tier0Config, run, hltConfig)
                except:
                    logging.exception("Can't configure for run %d" % (run))

            #
            # find unconfigured run/stream with data
            # populate RunConfig, setup workflows/filesets/subscriptions
            #
            runStreams = findNewRunStreamsDAO.execute(transaction=False)
            for run in sorted(runStreams.keys()):
                for stream in sorted(runStreams[run]):
                    try:
                        RunConfigAPI.configureRunStream(
                            tier0Config, run, stream, self.specDirectory,
                            self.dqmUploadProxy)
                    except:
                        logging.exception(
                            "Can't configure for run %d and stream %s" %
                            (run, stream))

        #
        # stop and close runs based on RunSummary and StorageManager records
        #
        RunLumiCloseoutAPI.stopRuns(self.dbInterfaceStorageManager)
        RunLumiCloseoutAPI.closeRuns(self.dbInterfaceStorageManager)

        #
        # release runs for Express
        #
        runs = findNewExpressRunsDAO.execute(transaction=False)

        if len(runs) > 0:

            binds = []
            for run in runs:
                binds.append({'RUN': run})

            if self.getExpressReadyRunsDAO != None:
                runs = self.getExpressReadyRunsDAO.execute(binds=binds,
                                                           transaction=False)

            if len(runs) > 0:

                binds = []
                for run in runs:
                    binds.append({'RUN': run})

                releaseExpressDAO.execute(binds=binds, transaction=False)

        #
        # release runs for PromptReco
        # check PromptRecoStatus first, i.e. big red button
        #
        if self.getPromptRecoStatusT0DataSvc():
            RunConfigAPI.releasePromptReco(tier0Config, self.specDirectory,
                                           self.dqmUploadProxy)

        #
        # insert express and reco configs into Tier0 Data Service
        #
        if self.haveT0DataSvc:
            self.updateRunConfigT0DataSvc()
            self.updateRunStreamDoneT0DataSvc()
            self.updateExpressConfigsT0DataSvc()
            self.updateRecoConfigsT0DataSvc()
            self.updateRecoReleaseConfigsT0DataSvc()
            self.lockDatasetsT0DataSvc()

        #
        # mark express and repack workflows as injected if certain conditions are met
        # (we don't do it immediately to prevent the TaskArchiver from cleaning up too early)
        #
        markWorkflowsInjectedDAO.execute(self.dbInterfaceSMNotify != None,
                                         transaction=False)

        #
        # close stream/lumis for run/streams that are active (fileset exists and open)
        #
        RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)

        #
        # feed new data into exisiting filesets
        #
        try:
            myThread.transaction.begin()
            feedStreamersDAO.execute(conn=myThread.transaction.conn,
                                     transaction=True)
        except:
            logging.exception("Can't feed data, bailing out...")
            raise
        else:
            myThread.transaction.commit()

        #
        # run ended and run/stream fileset open
        #    => check for complete lumi_closed record, all lumis finally closed and all data feed
        #          => if all conditions satisfied, close the run/stream fileset
        #
        RunLumiCloseoutAPI.closeRunStreamFilesets()

        #
        # check and delete active split lumis
        #
        RunLumiCloseoutAPI.checkActiveSplitLumis()

        #
        # insert workflows into CouchDB for monitoring
        #
        self.feedCouchMonitoring()

        #
        # Update Couch when Repack and Express have closed input filesets (analog to old T0 closeout)
        #
        self.closeOutRealTimeWorkflows()

        #
        # send repacked notifications to StorageManager
        #
        if self.dbInterfaceSMNotify:
            StorageManagerAPI.markRepacked(self.dbInterfaceSMNotify)

        #
        # upload PCL conditions to DropBox
        #
        ConditionUploadAPI.uploadConditions(self.dropboxuser, self.dropboxpass,
                                            self.serviceProxy)

        return
예제 #38
0
            self.logger.error(hte.headers)


if __name__ == '__main__':
    """ Simple main to execute the action standalone. You just need to set the task worker environment.
        The main is set up to work with the production task worker. If you want to use it on your own
        instance you need to change resthost, resturi, and twconfig.
    """
    resthost = 'cmsweb.cern.ch'
    dbinstance = 'prod'
    twconfig = '/data/srv/TaskManager/current/TaskWorkerConfig.py'

    logger = logging.getLogger()
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(module)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S %Z(%z)")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)

    from WMCore.Configuration import loadConfigurationFile
    cfg = loadConfigurationFile(twconfig)

    resthost = 'cmsweb-testbed.cern.ch'
    dbinstance = 'dev'
    fmdc = FMDCleaner(cfg.TaskWorker.logsDir)
    crabserver = CRABRest(hostname=resthost, localcert=cfg.TaskWorker.cmscert,
                          localkey=cfg.TaskWorker.cmskey, retry=2,
                          logger=logger)
    crabserver.setDbInstance(dbInstance=dbinstance)
    fmdc._execute(crabserver)
예제 #39
0
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False,
                      help="print extra messages to stdout")
    parser.add_option("-q",
                      "--quiet",
                      action="store_true",
                      dest="quiet",
                      default=False,
                      help="don't print any messages to stdout")

    parser.add_option("--config",
                      dest="config",
                      default=None,
                      metavar="FILE",
                      help="configuration file path")

    (options, args) = parser.parse_args()

    # TODO: adapt evaluation it for ASO
    if not options.config:
        raise

    configuration = loadConfigurationFile(os.path.abspath(options.config))

    mw = Getter(configuration, quiet=options.quiet, debug=options.debug)
    signal.signal(signal.SIGINT, mw.quit_)
    signal.signal(signal.SIGTERM, mw.quit_)
    mw.algorithm()