def connectToDB(): """ _connectToDB_ Connect to the database specified in the WMAgent config. """ if "WMAGENT_CONFIG" not in os.environ: print( "Please set WMAGENT_CONFIG to point at your WMAgent configuration." ) sys.exit(1) if not os.path.exists(os.environ["WMAGENT_CONFIG"]): print("Can't find config: %s" % os.environ["WMAGENT_CONFIG"]) sys.exit(1) wmAgentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"]) if not hasattr(wmAgentConfig, "CoreDatabase"): print("Your config is missing the CoreDatabase section.") socketLoc = getattr(wmAgentConfig.CoreDatabase, "socket", None) connectUrl = getattr(wmAgentConfig.CoreDatabase, "connectUrl", None) (dialect, junk) = connectUrl.split(":", 1) myWMInit = WMInit() myWMInit.setDatabaseConnection(dbConfig=connectUrl, dialect=dialect, socketLoc=socketLoc) return
def __call__(self, filesetToProcess): """ The algorithm itself """ # Get configuration initObj = WMInit() initObj.setLogging() initObj.setDatabaseConnection(os.getenv("DATABASE"), \ os.getenv('DIALECT'), os.getenv("DBSOCK")) myThread = threading.currentThread() daofactory = DAOFactory(package = "WMCore.WMBS" , \ logger = myThread.logger, \ dbinterface = myThread.dbi) locationNew = daofactory(classname="Locations.New") getFileLoc = daofactory(classname="Files.GetLocation") fileInFileset = daofactory(classname="Files.InFileset") logging.debug("DBSFeeder is processing %s" % \ filesetToProcess.name) logging.debug("the filesetBase name %s" \ % (filesetToProcess.name).split(":")[0]) LASTIME = filesetToProcess.lastUpdate # Get the start Run if asked startRun = (filesetToProcess.name).split(":")[3] # get list of files tries = 1 while True: try: blocks = self.dbsReader.getFiles(\ (filesetToProcess.name).split(":")[0]) now = time.time() logging.debug("DBS queries done ...") break except DBSReaderError, ex: logging.error("DBS error: %s, cannot get files for %s" % \ (str(ex), filesetToProcess.name)) # Close fileset filesetToProcess.markOpen(False) return # connection error, retry except DbsConnectionError, ex: logging.error("Unable to connect to DBS, retrying: " + \ str(ex)) if tries > self.connectionAttempts: #too many errors - bail out return tries = tries + 1
def __init__(self, testClassName="Unknown Class"): self.testClassName = testClassName self.testDir = None self.currModules = [] global hasDatabase self.hasDatabase = hasDatabase if self.hasDatabase: self.init = WMInit() self.deleteTmp = True
def testB_Database(self): """ _Database_ Testing the database stuff. Only works for MySQL backend """ init = WMInit() url = os.environ.get("DATABASE") dialect = os.environ.get("DIALECT", "MySQL") sock = os.environ.get("DBSOCK", None) init.setDatabaseConnection(url, dialect, sock) try: # Initial clear should work myThread = threading.currentThread() init.clearDatabase() # Clear one after another should work init.setSchema(modules=['WMCore.WMBS']) init.clearDatabase() init.setSchema(modules=['WMCore.WMBS']) init.clearDatabase() # Clear non-existant DB should work # Drop the database, and then make sure the database gets recreated a = myThread.dbi.engine.url.database dbName = myThread.dbi.processData( "SELECT DATABASE() AS dbname")[0].fetchall()[0][0] myThread.dbi.processData("DROP DATABASE %s" % dbName) dbName = myThread.dbi.processData( "SELECT DATABASE() AS dbname")[0].fetchall()[0][0] self.assertEqual(dbName, None) init.clearDatabase() dbName = myThread.dbi.processData( "SELECT DATABASE() AS dbname")[0].fetchall()[0][0] self.assertEqual(dbName, a) init.setSchema(modules=['WMCore.WMBS']) myThread.transaction.begin() myThread.transaction.processData("SELECT * FROM wmbs_job") init.clearDatabase() dbName = myThread.dbi.processData( "SELECT DATABASE() AS dbname")[0].fetchall()[0][0] self.assertEqual(dbName, a) myThread.transaction.begin() init.setSchema(modules=['WMCore.WMBS']) myThread.transaction.commit() except: init.clearDatabase() raise init.clearDatabase() return
else: stri=time.ctime().split() stri1=stri[2] stri2=stri[3].replace(":","") arguments["ProcessingVersion"] = '%s_%s'%(stri1,stri2) wmAgentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"]) if not hasattr(wmAgentConfig, "CoreDatabase"): print "Your config is missing the CoreDatabase section." socketLoc = getattr(wmAgentConfig.CoreDatabase, "socket", None) connectUrl = getattr(wmAgentConfig.CoreDatabase, "connectUrl", None) (dialect, junk) = connectUrl.split(":", 1) myWMInit = WMInit() myWMInit.setDatabaseConnection(dbConfig = connectUrl, dialect = dialect, socketLoc = socketLoc) workloadName = "CmsRunAnalysis-%s" % arguments["ProcessingVersion"] workloadFile = "CmsRunAnalysis-%s.pkl" % arguments["ProcessingVersion"] os.mkdir(workloadName) cmsRunAna = AnalysisWorkloadFactory() workload = cmsRunAna(workloadName, arguments) taskMaker = TaskMaker(workload, os.path.join(os.getcwd(), workloadName)) taskMaker.skipSubscription = True taskMaker.processWorkload() workload.save(os.path.join(workloadName, workloadFile))
def testOracleDatabase(self): """ Testing Oracle basic operations """ dialect = os.environ.get("DIALECT", "MySQL") if dialect.lower() == 'mysql': # this test can only run for Oracle return init = WMInit() url = os.environ.get("DATABASE") init.setDatabaseConnection(url, dialect) selectDbName = "SELECT ora_database_name FROM DUAL" destroyDb = """DECLARE BEGIN execute immediate 'purge recyclebin'; -- Tables FOR o IN (SELECT table_name name FROM user_tables) LOOP execute immediate 'drop table ' || o.name || ' cascade constraints'; END LOOP; -- Sequences FOR o IN (SELECT sequence_name name FROM user_sequences) LOOP execute immediate 'drop sequence ' || o.name; END LOOP; -- Triggers FOR o IN (SELECT trigger_name name FROM user_triggers) LOOP execute immediate 'drop trigger ' || o.name; END LOOP; -- Synonyms FOR o IN (SELECT synonym_name name FROM user_synonyms) LOOP execute immediate 'drop synonym ' || o.name; END LOOP; -- Functions FOR o IN (SELECT object_name name FROM user_objects WHERE object_type = 'FUNCTION') LOOP execute immediate 'drop function ' || o.name; END LOOP; -- Procedures FOR o IN (SELECT object_name name FROM user_objects WHERE object_type = 'PROCEDURE') LOOP execute immediate 'drop procedure ' || o.name; END LOOP; execute immediate 'purge recyclebin'; END;""" try: # Initial clear should work myThread = threading.currentThread() init.clearDatabase() # Clear non-existant DB should work init.clearDatabase() init.setSchema(modules=['WMCore.WMBS']) # Drop the database, and then make sure the database gets recreated a = myThread.dbi.engine.url.database self.assertEqual(myThread.dbi.engine.name, "oracle") self.assertIsNone(myThread.dbi.engine.url.database) self.assertEqual(myThread.dbi.engine.url.get_backend_name(), "oracle") self.assertEqual(myThread.dbi.engine.url.get_driver_name(), "cx_oracle") self.assertEqual(myThread.dbi.engine.url.host, "INT2R_NOLB") dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertTrue(dbName) myThread.transaction.processData("SELECT * FROM wmbs_job") init.clearDatabase() dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertTrue(dbName) myThread.dbi.processData(destroyDb) init.setSchema(modules=['WMCore.WMBS']) myThread.transaction.begin() myThread.transaction.processData("SELECT * FROM wmbs_job") init.clearDatabase() dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertTrue(dbName) myThread.transaction.begin() init.setSchema(modules=['WMCore.WMBS']) myThread.transaction.commit() except: init.clearDatabase() raise else: init.clearDatabase()
def testMySQLDatabase(self): """ Testing MySQL basic operations """ dialect = os.environ.get("DIALECT", "MySQL") if dialect.lower() == 'oracle': # this test can only run for MySQL return init = WMInit() url = os.environ.get("DATABASE") sock = os.environ.get("DBSOCK", None) init.setDatabaseConnection(url, dialect, sock) selectDbName = "SELECT DATABASE() AS dbname" destroyDbName = "DROP DATABASE %s" try: # Initial clear should work myThread = threading.currentThread() init.clearDatabase() # Clear non-existant DB should work init.clearDatabase() init.setSchema(modules=['WMCore.WMBS']) # Drop the database, and then make sure the database gets recreated a = myThread.dbi.engine.url.database self.assertEqual(myThread.dbi.engine.name, "mysql") self.assertTrue( myThread.dbi.engine.url.database in ("wmcore_unittest", "WMCore_unit_test")) self.assertEqual(myThread.dbi.engine.url.get_backend_name(), "mysql") self.assertEqual(myThread.dbi.engine.url.get_driver_name(), "mysqldb") self.assertEqual(myThread.dbi.engine.url.host, "localhost") dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertEqual(dbName, a) myThread.dbi.processData(destroyDbName % dbName) dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertEqual(dbName, None) init.clearDatabase() dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertEqual(dbName, a) init.setSchema(modules=['WMCore.WMBS']) myThread.transaction.begin() myThread.transaction.processData("SELECT * FROM wmbs_job") init.clearDatabase() dbName = myThread.dbi.processData(selectDbName)[0].fetchall()[0][0] self.assertEqual(dbName, a) myThread.transaction.begin() init.setSchema(modules=['WMCore.WMBS']) myThread.transaction.commit() except: init.clearDatabase() raise else: init.clearDatabase() return
sender = context.socket(zmq.PUSH) sender.connect("tcp://localhost:%s" % outPort) # Build config if not os.path.exists(configPath): # We can do nothing - logging.error("Something in the way of the config path") sys.exit(1) f = open(configPath, 'r') config = cPickle.load(f) f.close() # Setup DB wmInit = WMInit() setupDB(config, wmInit) # Create JSON handler jsonHandler = JSONRequests() wmFactory = WMFactory(name = "slaveFactory", namespace = namespace) slaveClass = wmFactory.loadObject(classname = slaveClassName, args = config) logging.info("Have slave class") while(True): encodedInput = receiver.recv() try: input = jsonHandler.decode(encodedInput)
def __call__(self, filesetToProcess): """ The algorithm itself """ global LOCK # Get configuration initObj = WMInit() initObj.setLogging() initObj.setDatabaseConnection(os.getenv("DATABASE"), \ os.getenv('DIALECT'), os.getenv("DBSOCK")) myThread = threading.currentThread() daofactory = DAOFactory(package = "WMCore.WMBS" , \ logger = myThread.logger, \ dbinterface = myThread.dbi) locationNew = daofactory(classname="Locations.New") getFileLoc = daofactory(classname="Files.GetLocation") logging.debug("the T0Feeder is processing %s" % \ filesetToProcess.name) logging.debug("the fileset name %s" % \ (filesetToProcess.name).split(":")[0]) startRun = (filesetToProcess.name).split(":")[3] fileType = (filesetToProcess.name).split(":")[2] # url builder primaryDataset = ((filesetToProcess.name).split(":")[0]).split('/')[1] processedDataset = (( filesetToProcess.name).split(":")[0]).split('/')[2] dataTier = (((filesetToProcess.name\ ).split(":")[0]).split('/')[3]).split('-')[0] # Fisrt call to T0 db for this fileset # Here add test for the closed fileset LASTIME = filesetToProcess.lastUpdate url = "/tier0/listfilesoverinterval/%s/%s/%s/%s/%s" % \ (fileType, LASTIME, primaryDataset,processedDataset, dataTier) tries = 1 while True: try: myRequester = JSONRequests(url="vocms52.cern.ch:8889") requestResult = myRequester.get(\ url+"/"+"?return_type=text/json%2Bdas") newFilesList = requestResult[0]["results"] except: logging.debug("T0Reader call error...") if tries == self.maxRetries: return else: tries += 1 continue logging.debug("T0ASTRun queries done ...") now = time.time() filesetToProcess.last_update = now LASTIME = int(newFilesList['end_time']) + 1 break # process all files if len(newFilesList['files']): LOCK.acquire() try: locationNew.execute(siteName="caf.cern.ch", seName="caf.cern.ch") except Exception as e: logging.debug("Error when adding new location...") logging.debug(e) logging.debug(format_exc()) for files in newFilesList['files']: # Assume parents aren't asked newfile = File(str(files['lfn']), \ size = files['file_size'], events = files['events']) try: if newfile.exists() == False: newfile.create() else: newfile.loadData() #Add run test if already exist for run in files['runs']: if startRun != 'None' and int(startRun) <= int(run): # ToDo: Distinguish between # filestA-RunX and filesetA-Run[0-9]* filesetRun = Fileset( name = (((\ filesetToProcess.name).split(':')[0]).split('/')[0]\ )+'/'+(((filesetToProcess.name).split(':')[0]).split\ ('/')[1])+'/'+(((filesetToProcess.name).split(':')[0]\ ).split('/')[2])+'/'+((((filesetToProcess.name).split\ (':')[0]).split('/')[3]).split('-')[0])+'-'+'Run'+str\ (run)+":"+":".join((filesetToProcess.name).split(':')[1:] \ ) ) if filesetRun.exists() == False: filesetRun.create() else: filesetRun.loadData() # Add test runs already there # (for growing dataset) - # to support file with different runs and lumi if not newfile['runs']: runSet = set() runSet.add(Run(run, *files['runs'][run])) newfile.addRunSet(runSet) fileLoc = getFileLoc.execute(file=files['lfn']) if 'caf.cern.ch' not in fileLoc: newfile.setLocation("caf.cern.ch") filesetRun.addFile(newfile) logging.debug( "new file created/loaded added by T0ASTRun...") filesetRun.commit() except Exception as e: logging.debug("Error when adding new files in T0ASTRun...") logging.debug(e) logging.debug(format_exc()) filesetToProcess.setLastUpdate\ (int(newFilesList['end_time']) + 1) filesetToProcess.commit() LOCK.release() else: logging.debug("nothing to do...") # For re-opned fileset or empty, try until the purge time if (int(now) / 3600 - LASTIME / 3600) > self.reopenTime: filesetToProcess.setLastUpdate(time.time()) filesetToProcess.commit() if LASTIME: myRequester = JSONRequests(url="vocms52.cern.ch:8889") requestResult = myRequester.get("/tier0/runs") for listRun in requestResult[0]: if int(startRun) <= int(listRun['run']): if listRun['status'] =='CloseOutExport' or \ listRun['status'] =='Complete' or listRun['status'] ==\ 'CloseOutT1Skimming': closeFileset = Fileset( name = (((\ filesetToProcess.name).split(':')[0]).split('/')[0])+'/'+\ (((filesetToProcess.name).split(':')[0]).split('/')[1]\ )+'/'+(((filesetToProcess.name).split(':')[0]).split('/')\ [2])+'/'+((((filesetToProcess.name).split(':')[0]).split\ ('/')[3]).split('-')[0])+'-'+'Run'+str(listRun['run'])\ +":"+":".join((filesetToProcess.name).split(':')[1:] ) ) if closeFileset.exists() != False: closeFileset = Fileset(id=closeFileset.exists()) closeFileset.loadData() if closeFileset.open == True: closeFileset.markOpen(False) # Commit the fileset filesetToProcess.commit() # Commit the fileset logging.debug("Test purge in T0ASTRun ...") filesetToProcess.load() LASTIME = filesetToProcess.lastUpdate if (int(now) / 3600 - LASTIME / 3600) > self.purgeTime: filesetToProcess.markOpen(False) logging.debug("Purge Done...") filesetToProcess.commit()
def __call__(self, filesetToProcess): """ The algorithm itself """ # Get configuration initObj = WMInit() initObj.setLogging() initObj.setDatabaseConnection(os.getenv("DATABASE"), \ os.getenv('DIALECT'), os.getenv("DBSOCK")) myThread = threading.currentThread() daofactory = DAOFactory(package = "WMCore.WMBS" , \ logger = myThread.logger, \ dbinterface = myThread.dbi) lastFileset = daofactory(classname="Fileset.ListFilesetByTask") lastWorkflow = daofactory(classname="Workflow.LoadFromTask") subsRun = daofactory(\ classname = "Subscriptions.LoadFromFilesetWorkflow") successJob = daofactory(classname="Subscriptions.SucceededJobs") allJob = daofactory(classname="Subscriptions.Jobs") fileInFileset = daofactory(classname="Files.InFileset") # Get the start Run if asked startRun = (filesetToProcess.name).split(":")[3] logging.debug("the T0Feeder is processing %s" % \ filesetToProcess.name) logging.debug("the fileset name %s" % \ (filesetToProcess.name).split(":")[0]) fileType = (filesetToProcess.name).split(":")[2] crabTask = filesetToProcess.name.split(":")[0] LASTIME = filesetToProcess.lastUpdate tries = 1 while True: try: myRequester = JSONRequests(url="vocms52.cern.ch:8889") requestResult = myRequester.get("/tier0/runs") except: logging.debug("T0Reader call error...") if tries == self.maxRetries: return else: tries += 1 continue logging.debug("T0ASTRunChain feeder queries done ...") now = time.time() break for listRun in requestResult[0]: if startRun != 'None' and int(listRun['run']) >= int(startRun): if listRun['status'] =='CloseOutExport' or listRun\ ['status']=='Complete' or listRun['status']=='CloseOutT1Skimming': crabWorkflow = lastWorkflow.execute(task=crabTask) crabFileset = lastFileset.execute\ (task=crabTask) crabrunFileset = Fileset(\ name = crabFileset[0]["name"].split(':')[0].split\ ('-Run')[0]+ '-Run' + str(listRun['run']) + ":" + \ ":".join(crabFileset[0]['name'].split(':')[1:]) ) if crabrunFileset.exists() > 0: crabrunFileset.load() currSubs = subsRun.execute\ (crabrunFileset.id, crabWorkflow[0]['id']) if currSubs: listsuccessJob = successJob.execute(\ subscription=currSubs['id']) listallJob = allJob.execute(\ subscription=currSubs['id']) if len(listsuccessJob) == len(listallJob): for currid in listsuccessJob: currjob = Job(id=currid) currjob.load() logging.debug("Reading FJR %s" % currjob['fwjr_path']) jobReport = readJobReport( currjob['fwjr_path']) if len(jobReport) > 0: if jobReport[0].files: for newFile in jobReport[0].files: logging.debug(\ "Output path %s" %newFile['LFN']) newFileToAdd = File(\ lfn=newFile['LFN'], locations ='caf.cern.ch') LOCK.acquire() if newFileToAdd.exists\ () == False : newFileToAdd.create() else: newFileToAdd.loadData() LOCK.release() listFile = \ fileInFileset.execute(filesetToProcess.id) if {'fileid': \ newFileToAdd['id']} not in listFile: filesetToProcess.addFile(\ newFileToAdd) filesetToProcess\ .setLastUpdate(now) filesetToProcess.commit() logging.debug(\ "new file created/loaded and added by T0ASTRunChain...") elif jobReport[0].analysisFiles: for newFile in jobReport\ [0].analysisFiles: logging.debug(\ "Ouput path %s " %newFile['LFN']) newFileToAdd = File(\ lfn=newFile['LFN'], locations ='caf.cern.ch') LOCK.acquire() if newFileToAdd.exists\ () == False : newFileToAdd.create() else: newFileToAdd.loadData() LOCK.release() listFile = \ fileInFileset.execute(filesetToProcess.id) if {'fileid': newFileToAdd\ ['id']} not in listFile: logging.debug\ ("%s loaded and added by T0ASTRunChain" %newFile['LFN']) filesetToProcess.addFile\ (newFileToAdd) filesetToProcess.\ setLastUpdate(now) filesetToProcess.commit() logging.debug(\ "new file created/loaded added by T0ASTRunChain...") else: break #Missed fjr - Try next time # Commit the fileset logging.debug("Test purge in T0ASTRunChain ...") filesetToProcess.load() LASTIME = filesetToProcess.lastUpdate # For re-opned fileset or empty, try until the purge time if (int(now) / 3600 - LASTIME / 3600) > self.reopenTime: filesetToProcess.setLastUpdate(time.time()) filesetToProcess.commit() if (int(now) / 3600 - LASTIME / 3600) > self.purgeTime: filesetToProcess.markOpen(False) logging.debug("Purge Done...")
def __call__(self, filesetToProcess): """ The algorithm itself """ global LOCK # Get configuration initObj = WMInit() initObj.setLogging() initObj.setDatabaseConnection(os.getenv("DATABASE"), \ os.getenv('DIALECT'), os.getenv("DBSOCK")) myThread = threading.currentThread() daofactory = DAOFactory(package = "WMCore.WMBS" , \ logger = myThread.logger, \ dbinterface = myThread.dbi) locationNew = daofactory(classname="Locations.New") getFileLoc = daofactory(classname="Files.GetLocation") fileInFileset = daofactory(classname="Files.InFileset") logging.debug("the T0Feeder is processing %s" % \ filesetToProcess.name) logging.debug("the fileset name %s" % \ (filesetToProcess.name).split(":")[0]) # Get the start Run if asked startRun = (filesetToProcess.name).split(":")[3] fileType = (filesetToProcess.name).split(":")[2] LASTIME = filesetToProcess.lastUpdate # url builder primaryDataset = ((filesetToProcess.name).split(":")[0]).split('/')[1] processedDataset = (( filesetToProcess.name).split(":")[0]).split('/')[2] dataTier = ((filesetToProcess.name\ ).split(":")[0]).split('/')[3] url = "/tier0/listfilesoverinterval/%s/%s/%s/%s/%s" % \ (fileType, LASTIME, primaryDataset,processedDataset, dataTier) tries = 1 while True: try: myRequester = JSONRequests(url="vocms52.cern.ch:8889") requestResult = myRequester.get(\ url+"/"+"?return_type=text/json%2Bdas") newFilesList = requestResult[0]["results"] except: logging.debug("T0Reader call error...") if tries == self.maxRetries: return else: tries += 1 continue logging.debug("T0 queries done ...") now = time.time() LASTIME = int(newFilesList['end_time']) + 1 break # process all files if len(newFilesList['files']): try: locationNew.execute(siteName="caf.cern.ch", seName="caf.cern.ch") except Exception as e: logging.debug("Error when adding new location...") logging.debug(e) logging.debug(format_exc()) for files in newFilesList['files']: # Assume parents aren't asked newfile = File(str(files['lfn']), \ size = files['file_size'], events = files['events']) try: LOCK.acquire() if newfile.exists() == False: newfile.create() for run in files['runs']: runSet = set() runSet.add(Run(run, *files['runs'][run])) newfile.addRunSet(runSet) else: newfile.loadData() fileLoc = getFileLoc.execute(file=files['lfn']) if 'caf.cern.ch' not in fileLoc: newfile.setLocation("caf.cern.ch") # else: # logging.debug("File already associated to %s" %fileLoc) LOCK.release() if len(newfile["runs"]): val = 0 for run in newfile['runs']: if run.run < int(startRun): val = 1 break if not val: listFile = fileInFileset.execute\ (filesetToProcess.id) if {'fileid': newfile['id']} not in listFile: filesetToProcess.addFile(newfile) filesetToProcess.setLastUpdate\ (int(newFilesList['end_time']) + 1) filesetToProcess.commit() logging.debug( "new file created/loaded added by T0AST..." ) except Exception as e: logging.debug("Error when adding new files in T0AST...") logging.debug(e) logging.debug(format_exc()) LOCK.release() filesetToProcess.commit() else: logging.debug("nothing to do in T0AST...") # For reopned fileset or empty # try until the purge time is reached if (int(now) / 3600 - LASTIME / 3600) > self.reopenTime: filesetToProcess.setLastUpdate(time.time()) filesetToProcess.commit() # Commit the fileset logging.debug("Test purge in T0AST ...") filesetToProcess.load() LASTIME = filesetToProcess.lastUpdate if (int(now) / 3600 - LASTIME / 3600) > self.purgeTime: filesetToProcess.markOpen(False) logging.debug("Purge Done...") filesetToProcess.commit()
def __call__(self, filesetToProcess): """ The algorithm itself """ # Get configuration initObj = WMInit() initObj.setLogging() initObj.setDatabaseConnection(os.getenv("DATABASE"), \ os.getenv('DIALECT'), os.getenv("DBSOCK")) myThread = threading.currentThread() daofactory = DAOFactory(package = "WMCore.WMBS" , \ logger = myThread.logger, \ dbinterface = myThread.dbi) locationNew = daofactory(classname = "Locations.New") getFileLoc = daofactory(classname = "Files.GetLocation") fileInFileset = daofactory(classname = "Files.InFileset") logging.debug("DBSFeeder is processing %s" % \ filesetToProcess.name) logging.debug("the filesetBase name %s" \ % (filesetToProcess.name).split(":")[0]) LASTIME = filesetToProcess.lastUpdate # Get the start Run if asked startRun = (filesetToProcess.name).split(":")[3] # get list of files tries = 1 while True: try: blocks = self.dbsReader.getFiles(\ (filesetToProcess.name).split(":")[0]) now = time.time() logging.debug("DBS queries done ...") break except DBSReaderError as ex: logging.error("DBS error: %s, cannot get files for %s" % \ (str(ex), filesetToProcess.name)) # Close fileset filesetToProcess.markOpen(False) return # connection error, retry except DbsConnectionError as ex: logging.error("Unable to connect to DBS, retrying: " + \ str(ex)) if tries > self.connectionAttempts: #too many errors - bail out return tries = tries + 1 # check for empty datasets if blocks == {}: logging.debug("DBS: Empty blocks - %s" %filesetToProcess.name) return filesetToProcess # get all file blocks blockList = blocks.keys() # process all file blocks for fileBlock in blockList: seList = blocks[fileBlock]['StorageElements'] # add files for non blocked SE if seList is None or seList == []: logging.info("fileblock %s - no SE's associated" % \ fileBlock) continue else: for loc in seList: locationNew.execute(siteName = loc, seName = loc) for files in blocks[fileBlock]['Files']: if startRun != 'None': if len(files['LumiList']): for lumi in files['LumiList']: if int(startRun) <= int(lumi['RunNumber' ]): newfile = File(files['LogicalFileName'], \ size=files['FileSize'], events=files\ ['NumberOfEvents']) LOCK.acquire() if newfile.exists() == False : newfile.create() filesetToProcess.addFile(newfile) filesetToProcess.setLastUpdate(\ int(time.time())) filesetToProcess.commit() runSet = set() runSet.add(Run( lumi\ ['RunNumber' ], *[lumi['LumiSectionNumber']] )) newfile.addRunSet(runSet) else: newfile.loadData() listFile = fileInFileset.execute\ (filesetToProcess.id) if {'fileid': newfile[\ 'id']} not in listFile: filesetToProcess.addFile(newfile) filesetToProcess.setLastUpdate\ (int(time.time())) filesetToProcess.commit() val = 0 for run in newfile['runs']: if lumi['RunNumber' ] == run.run: val = 1 break if not val: runSet = set() runSet.add(Run(\ lumi['RunNumber' ], *[lumi['LumiSectionNumber']])) newfile.addRunSet(runSet) fileLoc = getFileLoc.execute(\ file = files['LogicalFileName']) if fileLoc: for loc in seList: if loc not in fileLoc: newfile.setLocation(\ loc) else: newfile.setLocation(seList) LOCK.release() else: # Assume parents and LumiSection aren't asked newfile = File(files['LogicalFileName'], \ size=files['FileSize'], events=files['NumberOfEvents']) LOCK.acquire() if newfile.exists() == False : newfile.create() # Update fileset last update parameter filesetToProcess.addFile(newfile) logging.debug("new file created and added by DBS") filesetToProcess.setLastUpdate(int(time.time())) filesetToProcess.commit() else: newfile.loadData() listFile = fileInFileset.execute(filesetToProcess.id) if {'fileid': newfile['id']} not in listFile: filesetToProcess.addFile(newfile) logging.debug("new file loaded and added by DBS") filesetToProcess.setLastUpdate(int(time.time())) filesetToProcess.commit() fileLoc = getFileLoc.execute(\ file = files['LogicalFileName']) if fileLoc: for loc in seList: if loc not in fileLoc: newfile.setLocation(loc) else: newfile.setLocation(seList) LOCK.release() filesetToProcess.load() LASTIME = filesetToProcess.lastUpdate # For re-opned fileset or empty, try until the purge time if (int(now)/3600 - LASTIME/3600) > self.reopenTime: filesetToProcess.setLastUpdate(int(time.time())) filesetToProcess.commit() if (int(now)/3600 - LASTIME/3600) > self.purgeTime: filesetToProcess.markOpen(False) logging.debug("Purge Done...") filesetToProcess.commit() logging.debug("DBS feeder work done...")