def __init__(self, logger, connectUrl, owner): object.__init__(self) dbFactory = DBFactory(logger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(logger, self.dbi) self.owner = owner self.sqlDict = { 'trig': """ SELECT TABLE_NAME, TRIGGER_BODY FROM USER_TRIGGERS WHERE TABLE_OWNER='%s' """ % (owner), 'primaryKey': """ SELECT cols.table_name table_name, cols.column_name primaryk FROM all_constraints cons, all_cons_columns cols WHERE cols.table_name = :table_name AND cons.OWNER='%s' AND cons.constraint_type = 'P' AND cons.constraint_name = cols.constraint_name AND cons.owner = cols.owner """ % (owner), 'sequen': """ SELECT INCREMENT_BY inc, CACHE_SIZE csz from USER_SEQUENCES where SEQUENCE_NAME=:seq_name """ }
def setUp(self): """ setup for test. """ print "Assuming that DBSBuffer database is already created......!!!!!!!!!" if not DBSBufferInterface._setup_done: logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M', filename='%s.log' % __file__, filemode='w') myThread = threading.currentThread() myThread.logger = logging.getLogger('DBSBufferInterface') myThread.dialect = 'MySQL' options = {} options['unix_socket'] = os.getenv("DBSOCK") dbFactory = DBFactory(myThread.logger, os.getenv("DATABASE"), \ options) myThread.dbi = dbFactory.connect() myThread.transaction = Transaction(myThread.dbi) #myThread.transaction.begin() #myThread.transaction.commit() DBSBufferInterface._setup_done = True
def __init__(self,logger,connectUrl,owner): object.__init__(self) dbFactory = DBFactory(logger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(logger,self.dbi) self.owner = owner self.sqlDict = {'trig': """ SELECT TABLE_NAME, TRIGGER_BODY FROM USER_TRIGGERS WHERE TABLE_OWNER='%s' """ % (owner), 'primaryKey': """ SELECT cols.table_name table_name, cols.column_name primaryk FROM all_constraints cons, all_cons_columns cols WHERE cols.table_name = :table_name AND cons.OWNER='%s' AND cons.constraint_type = 'P' AND cons.constraint_name = cols.constraint_name AND cons.owner = cols.owner """ % (owner), 'sequen': """ SELECT INCREMENT_BY inc, CACHE_SIZE csz from USER_SEQUENCES where SEQUENCE_NAME=:seq_name """ }
def __init__(self, store_source): logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt = '%m-%d %H:%M') self.logger = logging.getLogger('OIDDBOidStore') dbi = DBFactory(self.logger, store_source).connect() sqlstore.SQLStore.__init__(self, dbi.connection().connection)
def __init__(self, store_source): logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M') self.logger = logging.getLogger('OIDDBOidStore') dbi = DBFactory(self.logger, store_source).connect() sqlstore.SQLStore.__init__(self, dbi.connection().connection)
def __init__(self, config): UserStore.__init__(self, config) logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M') self.logger = logging.getLogger('OIDDBUserStore') self.conn = DBFactory(self.logger, config.source).connect()
def initialize(self, db_config): self.db_config = db_config self.sourceUrl = None self.migration_req_id = 0 self.block_names = [] self.migration_block_ids = [] self.inserted = True dbowner = self.db_config.get('dbowner') connectUrl = self.db_config.get('connectUrl') dbFactory = DBFactory(MgrLogger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(MgrLogger,self.dbi) self.dbsMigrate = DBSMigrate(MgrLogger, self.dbi, dbowner) self.DBSBlockInsert = DBSBlockInsert(MgrLogger, self.dbi, dbowner)
def initialize(self, db_config): self.db_config = db_config self.sourceUrl = None self.migration_req_id = 0 self.block_names = [] self.migration_block_ids = [] self.inserted = True dbowner = self.db_config.get('dbowner') connectUrl = self.db_config.get('connectUrl') dbFactory = DBFactory(MgrLogger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(MgrLogger, self.dbi) self.dbsMigrate = DBSMigrate(MgrLogger, self.dbi, dbowner) self.DBSBlockInsert = DBSBlockInsert(MgrLogger, self.dbi, dbowner)
def __init__(self, config): """ _init_ """ BaseWorkerThread.__init__(self) myThread = threading.currentThread() self.daoFactory = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=myThread.dbi) self.tier0ConfigFile = config.Tier0Feeder.tier0ConfigFile self.specDirectory = config.Tier0Feeder.specDirectory self.dropboxuser = getattr(config.Tier0Feeder, "dropboxuser", None) self.dropboxpass = getattr(config.Tier0Feeder, "dropboxpass", None) self.transferSystemBaseDir = getattr(config.Tier0Feeder, "transferSystemBaseDir", None) if self.transferSystemBaseDir != None: if not os.path.exists(self.transferSystemBaseDir): self.transferSystemBaseDir = None self.dqmUploadProxy = getattr(config.Tier0Feeder, "dqmUploadProxy", None) self.serviceProxy = getattr(config.Tier0Feeder, "serviceProxy", None) self.localRequestCouchDB = RequestDBWriter( config.AnalyticsDataCollector.localT0RequestDBURL, couchapp=config.AnalyticsDataCollector.RequestCouchApp ) hltConfConnectUrl = config.HLTConfDatabase.connectUrl dbFactoryHltConf = DBFactory(logging, dburl=hltConfConnectUrl, options={}) dbInterfaceHltConf = dbFactoryHltConf.connect() daoFactoryHltConf = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=dbInterfaceHltConf) self.getHLTConfigDAO = daoFactoryHltConf(classname="RunConfig.GetHLTConfig") storageManagerConnectUrl = config.StorageManagerDatabase.connectUrl dbFactoryStorageManager = DBFactory(logging, dburl=storageManagerConnectUrl, options={}) self.dbInterfaceStorageManager = dbFactoryStorageManager.connect() self.getExpressReadyRunsDAO = None if hasattr(config, "PopConLogDatabase"): popConLogConnectUrl = getattr(config.PopConLogDatabase, "connectUrl", None) if popConLogConnectUrl != None: dbFactoryPopConLog = DBFactory(logging, dburl=popConLogConnectUrl, options={}) dbInterfacePopConLog = dbFactoryPopConLog.connect() daoFactoryPopConLog = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=dbInterfacePopConLog) self.getExpressReadyRunsDAO = daoFactoryPopConLog(classname="Tier0Feeder.GetExpressReadyRuns") self.haveT0DataSvc = False if hasattr(config, "T0DataSvcDatabase"): t0datasvcConnectUrl = getattr(config.T0DataSvcDatabase, "connectUrl", None) if t0datasvcConnectUrl != None: self.haveT0DataSvc = True dbFactoryT0DataSvc = DBFactory(logging, dburl=t0datasvcConnectUrl, options={}) dbInterfaceT0DataSvc = dbFactoryT0DataSvc.connect() self.daoFactoryT0DataSvc = DAOFactory( package="T0.WMBS", logger=logging, dbinterface=dbInterfaceT0DataSvc ) return
class List_t(unittest.TestCase): def setUp(self): """setup all necessary parameters""" dburl = os.environ["DBS_TEST_DBURL_READER"] self.logger = logging.getLogger("dbs test logger") self.dbowner = os.environ["DBS_TEST_DBOWNER_READER"] self.dbi = DBFactory(self.logger, dburl).connect() def test01(self): """dao.Oracle.File.List: Basic""" conn = self.dbi.connection() dao = FileList(self.logger, self.dbi, self.dbowner) result = dao.execute(conn, dataset="*") self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) result = dao.execute(conn, block_name='*') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) result = dao.execute(conn, logical_file_name='*') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) conn.close()
def __init__(self, config = {}): """ __DatabasePage__ A page with a database connection (a WMCore.Database.DBFormatter) held in self.dbi. Look at the DBFormatter class for other handy helper methods, such as getBinds and formatDict. The DBFormatter class was originally intended to be extensively sub-classed, such that it's subclasses followed the DAO pattern. For web tools we do not generally do this, and you will normally access the database interface directly: binds = {'id': 123} sql = "select * from table where id = :id" result = self.dbi.processData(sql, binds) return self.formatDict(result) Although following the DAO pattern is still possible and encouraged where appropriate. However, if you want to use the DAO pattern it may be better to *not* expose the DAO classes and have a normal DatabasePage exposed that passes the database connection to all the DAO's. """ TemplatedPage.__init__(self, config) dbConfig = ConfigDBMap(config) conn = DBFactory(self, dbConfig.getDBUrl(), dbConfig.getOption()).connect() DBFormatter.__init__(self, self, conn) myThread = threading.currentThread() myThread.transaction = Transaction(conn) myThread.transaction.commit() return
def __init__(self, config): UserStore.__init__(self, config) logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt = '%m-%d %H:%M') self.logger = logging.getLogger('OIDDBUserStore') self.conn = DBFactory(self.logger, config.source).connect()
def testAllURLs(self): """ Checks all the url's we use in the tests are valid """ urls = [ 'sqlite://', 'sqlite:///:memory:', 'sqlite:////absolute/path/to/database.txt', 'sqlite:///relative/path/to/database.txt', 'postgres://*****:*****@host:1234/database', 'mysql://host/database', 'mysql://username@host/database', 'mysql://*****:*****@host:1234/database', 'oracle://*****:*****@tnsName', 'oracle://*****:*****@host:1234/sidname' ] for testurl in urls: try: dbf = DBFactory(self.logger, dburl=testurl) if self.urlTest(dbf.dburl, testurl): print("testAllURLs : ", testurl, " .... OK!") else: print("testAllURLs : ", testurl, " .... FAIL!") except Exception as e: print("testAllURLs : ", testurl, " .... EXCEPTION!", e)
def bufferFWJR(self, fwjr): """ Mimics creation of component and handles JobSuccess messages. """ # read the default config first. config = loadConfigurationFile(os.path.join(os.getenv('WMCOREBASE'), \ 'src/python/WMComponent/DBSBuffer/DefaultConfig.py')) # some general settings that would come from the general default # config file config.Agent.contact = "*****@*****.**" config.Agent.teamName = "DBS" config.Agent.agentName = "DBS Buffer" config.section_("General") config.General.workDir = os.getenv("TESTDIR") config.section_("CoreDatabase") config.CoreDatabase.dialect = 'mysql' config.CoreDatabase.user = os.getenv("DBUSER") config.CoreDatabase.passwd = os.getenv("DBPASS") config.CoreDatabase.hostname = os.getenv("DBHOST") config.CoreDatabase.name = os.getenv("DBNAME") myThread = threading.currentThread() myThread.logger = logging.getLogger('DBSBufferInterface') myThread.dialect = 'MySQL' options = {} options['unix_socket'] = os.getenv("DBSOCK") dbFactory = DBFactory(myThread.logger, os.getenv("DATABASE"), \ options) testDBSBuffer = DBSBuffer(config) testDBSBuffer.prepareToStart() myThread.dbi = dbFactory.connect() myThread.transaction = Transaction(myThread.dbi) testDBSBuffer.handleMessage('JobSuccess', fwjr) while threading.activeCount() > 1: print('Currently: '+str(threading.activeCount())+\ ' Threads. Wait until all our threads have finished') time.sleep(1) DBSBufferInterface._teardown = True
def genericTest(self, testname=None, testoptions={}, testurl=None): try: dbf = DBFactory(self.logger, options=testoptions) if self.urlTest(dbf.dburl, testurl): print testname, " : ", testurl, " .... OK!" else: print testname, " : ", testurl, " .... FAIL!" except Exception as e: print testname, " : ", testurl, " .... EXCEPTION!", e
def __init__(self): #url = "oracle://*****:*****@host:port/sid" #url="oracle://*****:*****@uscmsdb03.fnal.gov:1521/cmscald" #self.owner="anzar" #url="mysql://*****:*****@cmssrv49.fnal.gov:3306/CMS_DBS3" url = "mysql://*****:*****@cmssrv49.fnal.gov:3306/CMS_DBS3_ANZ_2" self.owner = "__MYSQL__" self.logger = logging.getLogger("dbs test logger") self.dbi = DBFactory(self.logger, url).connect()
def test02(self): """business.DBSFile.listFileParents: Basic""" dburl = os.environ["DBS_TEST_DBURL_READER"] dbowner = os.environ["DBS_TEST_DBOWNER_READER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSFile(self.logger, dbi, dbowner) result = bo.listFileParents(logical_file_name='%') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0)
def test03(self): """business.DBSPrimaryDataset.listPrimaryDatasets: Validation""" dbowner = os.environ["DBS_TEST_DBOWNER_READER"] dburl = os.environ["DBS_TEST_DBURL_READER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSPrimaryDataset(self.logger, dbi, dbowner) result = bo.listPrimaryDatasets(primary_ds_name=self.primary_ds_name) self.assertTrue(type(result) == list) self.assertEqual(len(result), 1) self.assertEqual(result[0]["primary_ds_name"], self.primary_ds_name)
def test01(self): """business.DBSPrimaryDataset.insertPrimaryDatasets: Basic""" dburl = os.environ["DBS_TEST_DBURL_WRITER"] dbowner = os.environ["DBS_TEST_DBOWNER_WRITER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSPrimaryDataset(self.logger, dbi, dbowner) binput = { 'primary_ds_name': self.primary_ds_name, 'primary_ds_type': 'TEST' } bo.insertPrimaryDataset(binput)
def test02(self): """business.DBSDataset.listDatasets: Basic""" dburl = os.environ["DBS_TEST_DBURL_READER"] dbowner = os.environ["DBS_TEST_DBOWNER_READER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSDataset(self.logger, dbi, dbowner) bo.listDatasets() bo.listDatasets(dataset='%') result = bo.listDatasets("ThisDoesNotExist") self.assertTrue(type(result) == list) self.assertEqual(len(result), 0)
def main(): dbFactoryT0AST = DBFactory(logging, dburl = 'oracle://*****:*****@GOOGLE', options = {}) dbInterfaceT0AST = dbFactoryT0AST.connect() dao = TheDAO(logging, dbInterfaceT0AST) runsDao = aSecondDAO(logging, dbInterfaceT0AST) runs = runsDao.execute() lumiFileHandle = open('/afs/cern.ch/user/d/dballest/public/t0/HILumis.txt', 'r') lumiOffsets = {} for line in lumiFileHandle: tokens = line.split() run = tokens[0] lumioff = tokens[1] lumiOffsets[run] = lumioff lumiFileHandle.close() rateInfo = {} for run in runs: results = dao.execute(run = run[0], lumiCountOffset = int(lumiOffsets.get(str(run[0]), 0))) if results: rateInfo[str(run[0])] = results theFile = '/afs/cern.ch/user/c/cmsprod/www/pdRates.txt' tmpFileHandle = open('/tmp/pdRates.tmp', 'w') tmpFileHandle.write('Rate information\n') currentTime = time.strftime('%d-%m-%y %H:%M %Z') tmpFileHandle.write('Updated on %s\n' % currentTime) for run in sorted(rateInfo.keys(), reverse = True): tmpFileHandle.write('Run: %s\n' % run) for PD in rateInfo[run]: tmpFileHandle.write('PD: %15s Event Rate: %4.2f Hz\n' % (PD, rateInfo[run][PD])) tmpFileHandle.write('=====================================================\n') tmpFileHandle.write('=====================================================\n') tmpFileHandle.close() shutil.move('/tmp/pdRates.tmp', theFile) return 0
def __init__(self, database, dbConfig): """ initialize the API instance """ # get logger self.logger = logging.getLogger() # create an instance of database if isinstance(dbConfig, basestring): self.dbInstance = DBFactory(self.logger, dburl=dbConfig) self.dbConfig = dbConfig else: self.dbInstance = DBFactory(self.logger, options=dbConfig) self.dbConfig.update(dbConfig) # report error if not successful if self.dbInstance is None: self.logger.error("Failed to Initialize BossLiteDBWMCore") return # create a session and db access self.session = None
def __init__(self, database, dbConfig): """ initialize the API instance """ # get logger self.logger = logging.getLogger() # create an instance of database if isinstance(dbConfig, basestring): self.dbInstance = DBFactory(self.logger, dburl=dbConfig) self.dbConfig = dbConfig else: self.dbInstance = DBFactory(self.logger, options=dbConfig) self.dbConfig.update( dbConfig ) # report error if not successful if self.dbInstance is None: self.logger.error( "Failed to Initialize BossLiteDBWMCore" ) return # create a session and db access self.session = None
def setDatabaseConnection(self, dbConfig, dialect, socketLoc=None): """ Sets the default connection parameters, without having to worry much on what attributes need to be set. This is esepcially advantagous for developers of third party projects that want to use only parts of the WMCore lib. The class differentiates between different formats used by external projects. External project formats that are supported can activated it by setting the flavor flag. """ myThread = threading.currentThread() if getattr(myThread, "dialect", None) != None: # Database is already initialized, we'll create a new # transaction and move on. if hasattr(myThread, "transaction"): if myThread.transaction != None: myThread.transaction.commit() myThread.transaction = Transaction(myThread.dbi) return options = {} if dialect.lower() == 'mysql': dialect = 'MySQL' if socketLoc != None: options['unix_socket'] = socketLoc elif dialect.lower() == 'oracle': dialect = 'Oracle' elif dialect.lower() == 'http': dialect = 'CouchDB' else: msg = "Unsupported dialect %s !" % dialect logging.error(msg) raise WMInitException(msg) myThread.dialect = dialect myThread.logger = logging myThread.dbFactory = DBFactory(logging, dbConfig, options) myThread.dbi = myThread.dbFactory.connect() # The transaction object will begin a transaction as soon as it is # initialized. I'd rather have the user handle that, so we'll commit # it here. myThread.transaction = Transaction(myThread.dbi) myThread.transaction.commit() return
def test03(self): """business.DBSDataset.listDatasets: with parent_dataset, release_version, pset_hash, app_name, output_module_label""" dburl = os.environ["DBS_TEST_DBURL_READER"] dbowner = os.environ["DBS_TEST_DBOWNER_READER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSDataset(self.logger, dbi, dbowner) bo.listDatasets() bo.listDatasets(dataset='%') bo.listDatasets(dataset='%', release_version='%') bo.listDatasets(pset_hash='%') bo.listDatasets(app_name='%') bo.listDatasets(output_module_label='%') result = bo.listDatasets("ThisDoesNotExist") self.assertTrue(type(result) == list) self.assertEqual(len(result), 0)
def test02(self): """business.DBSOutputConfig.listOutputConfig: Basic""" dburl = os.environ["DBS_TEST_DBURL_READER"] dbowner = os.environ["DBS_TEST_DBOWNER_READER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSOutputConfig(self.logger, dbi, dbowner) bo.listOutputConfigs() bo.listOutputConfigs(dataset='%') bo.listOutputConfigs(logical_file_name='%') bo.listOutputConfigs(release_version='%') bo.listOutputConfigs(pset_hash='%') bo.listOutputConfigs(app_name='%') bo.listOutputConfigs(output_module_label='%') bo.listOutputConfigs(dataset='%', release_version='%')
class List_t(unittest.TestCase): def setUp(self): """setup all necessary parameters""" dburl = os.environ["DBS_TEST_DBURL_READER"] self.logger = logging.getLogger("dbs test logger") self.dbowner = os.environ["DBS_TEST_DBOWNER_READER"] self.dbi = DBFactory(self.logger, dburl).connect() def test01(self): """dao.Oracle.PrimaryDataset.List: Basic""" conn = self.dbi.connection() dao = PrimaryDatasetList(self.logger, self.dbi, self.dbowner) dao.execute(conn) dao.execute(conn, primary_ds_name="*") result = dao.execute(conn, "ThisDoesNotExist") self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) conn.close()
def __init__(self, config): """ _init_ """ BaseWorkerThread.__init__(self) myThread = threading.currentThread() self.daoFactory = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = myThread.dbi) self.tier0ConfigFile = config.Tier0Feeder.tier0ConfigFile self.specDirectory = config.Tier0Feeder.specDirectory self.dropboxuser = config.Tier0Feeder.dropboxuser self.dropboxpass = config.Tier0Feeder.dropboxpass self.transferSystemBaseDir = getattr(config.Tier0Feeder, "transferSystemBaseDir", None) if self.transferSystemBaseDir != None: if not os.path.exists(self.transferSystemBaseDir): self.transferSystemBaseDir = None self.dqmUploadProxy = config.WMBSService.proxy self.localSummaryCouchDB = WMStatsWriter(config.AnalyticsDataCollector.localWMStatsURL) hltConfConnectUrl = config.HLTConfDatabase.connectUrl dbFactoryHltConf = DBFactory(logging, dburl = hltConfConnectUrl, options = {}) dbInterfaceHltConf = dbFactoryHltConf.connect() daoFactoryHltConf = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = dbInterfaceHltConf) self.getHLTConfigDAO = daoFactoryHltConf(classname = "RunConfig.GetHLTConfig") storageManagerConnectUrl = config.StorageManagerDatabase.connectUrl dbFactoryStorageManager = DBFactory(logging, dburl = storageManagerConnectUrl, options = {}) self.dbInterfaceStorageManager = dbFactoryStorageManager.connect() self.getExpressReadyRunsDAO = None if hasattr(config, "PopConLogDatabase"): popConLogConnectUrl = getattr(config.PopConLogDatabase, "connectUrl", None) if popConLogConnectUrl != None: dbFactoryPopConLog = DBFactory(logging, dburl = popConLogConnectUrl, options = {}) dbInterfacePopConLog = dbFactoryPopConLog.connect() daoFactoryPopConLog = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = dbInterfacePopConLog) self.getExpressReadyRunsDAO = daoFactoryPopConLog(classname = "Tier0Feeder.GetExpressReadyRuns") return
class List_t(unittest.TestCase): def setUp(self): """setup all necessary parameters""" dburl = os.environ["DBS_TEST_DBURL_READER"] self.logger = logging.getLogger("dbs test logger") self.dbowner = os.environ["DBS_TEST_DBOWNER_READER"] self.dbi = DBFactory(self.logger, dburl).connect() def test01(self): """dao.Oracle.OutputModuleConfig.List: Basic""" conn = self.dbi.connection() dao = OutputModuleConfigList(self.logger, self.dbi, self.dbowner) dao.execute(conn) dao.execute(conn, dataset = '%') dao.execute(conn, logical_file_name = '%') dao.execute(conn, release_version = '%') dao.execute(conn, pset_hash = '%') dao.execute(conn, app = '%') dao.execute(conn, output_label = '%') conn.close()
def testDBSFileList(self): """business.DBSFile.listFiles: Basic""" dburl = os.environ["DBS_TEST_DBURL_READER"] dbowner = os.environ["DBS_TEST_DBOWNER_READER"] dbi = DBFactory(self.logger, dburl).connect() bo = DBSFile(self.logger, dbi, dbowner) result = bo.listFiles('NoSuchFile') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) result = bo.listFiles(dataset='NoSuchDataset') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) result = bo.listFiles(block_name='NoSuchBlock') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0) result = bo.listFiles(logical_file_name='NoSuchLFN') self.assertTrue(type(result) == list) self.assertEqual(len(result), 0)
def __init__(self, dburl, owner): threading.Thread.__init__(self) logger = logging.getLogger("dbs test logger") dbi = DBFactory(logger, dburl).connect() self.bo = DBSFile(logger, dbi, owner)
class DBUserStore(UserStore): def __init__(self, config): UserStore.__init__(self, config) logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt = '%m-%d %H:%M') self.logger = logging.getLogger('OIDDBUserStore') self.conn = DBFactory(self.logger, config.source).connect() def load(self, user): """ Build a dict like: {'permissions' : {role: [sites/groups]}, 'fullname' : user.fullname, 'dn' : user.dn} """ grpsql = """select contact.forename || ' ' || contact.SURNAME as fullname, contact.dn, role.title, user_group.name from contact, role, group_responsibility, user_group where contact.username=:username and contact.id = group_responsibility.contact and group_responsibility.role = role.id and user_group.id = group_responsibility.user_group""" sitesql = """select contact.forename || ' ' || contact.SURNAME as fullname, contact.dn, role.title, siteinfo_v2.cms_name from contact, role, site_responsibility, siteinfo_v2 where contact.username=:username and contact.id = site_responsibility.contact and site_responsibility.role = role.id and siteinfo_v2.id = site_responsibility.site""" userdict = {} data = self.conn.processData([grpsql, sitesql], binds = [{'username': user}, {'username': user}]) for d in data: for r in d.fetchall(): if 'fullname' not in userdict.keys(): userdict['fullname'] = r[0] if 'dn' not in userdict.keys(): userdict['dn'] = r[1] if 'permissions' not in userdict.keys(): userdict['permissions'] = {r[2]:[r[3]]} else: if r[2] in userdict['permissions'].keys(): userdict['permissions'][r[2]].append(r[3]) else: userdict['permissions'][r[2]] = [r[3]] print r return userdict def checkpass(self, user, password): sql = 'select passwd from user_passwd where username = :username' try: data = self.conn.processData(sql, binds = {'username': user}) if len(data) == 1: encpassword = data[0].fetchone()[0] return encpassword == crypt(password, encpassword) except Exception, e: self.logger.info(str(e)) return False
bindVarCounter += 1 localT0AST.processData(insertQuery, bindVars, None, None) if len(sys.argv) != 4: print "Usage:" print " ./promptSkimInjector LOCAL_T0AST_URL PROD_T0AST_READER_URL RUN_NUMBER" print "" sys.exit(0) localT0ASTUrl = sys.argv[1] remoteT0ASTUrl = sys.argv[2] runNum = int(sys.argv[3]) localDBFactory = DBFactory(logging, localT0ASTUrl) remoteDBFactory = DBFactory(logging, remoteT0ASTUrl) localDbi = localDBFactory.connect() remoteDbi = remoteDBFactory.connect() print "\nCreating WMBS Schema in local Oracle...", wmbsCreate = Create(logging, localDbi) wmbsCreate.execute() print "done." copyTableNames = [ "processed_dataset", "primary_dataset", "data_tier", "dataset_path", "run_status", "block_status", "block_migrate_status", "run_stream_cmssw_assoc", "cmssw_version", "t1skim_config", "phedex_subscription", "storage_node", "run", "block_run_assoc"
def initInThread(self): """ Default intialization of the harness including setting some diagnostic messages. This method is called when we call 'prepareToStart' """ try: self.messages = {} compName = self.config.Agent.componentName compSect = getattr(self.config, compName, None) if not hasattr(compSect, "logFile"): if not getattr(compSect, 'componentDir', None): errorMessage = "No componentDir for log entries found!\n" errorMessage += "Harness cannot run without componentDir.\n" logging.error(errorMessage) raise HarnessException(errorMessage) compSect.logFile = os.path.join(compSect.componentDir, "ComponentLog") print('Log file is: ' + compSect.logFile) logHandler = RotatingFileHandler(compSect.logFile, "a", 1000000000, 3) logMsgFormat = getattr( compSect, "logMsgFormat", "%(asctime)s:%(thread)d:%(levelname)s:%(module)s:%(message)s") logFormatter = \ logging.Formatter(logMsgFormat) logHandler.setFormatter(logFormatter) logLevelName = getattr(compSect, 'logLevel', 'INFO') logLevel = getattr(logging, logLevelName) logging.getLogger().addHandler(logHandler) logging.getLogger().setLevel(logLevel) self.logMsg = { 'DEBUG': logging.DEBUG, 'ERROR': logging.ERROR, 'NOTSET': logging.NOTSET, 'CRITICAL': logging.CRITICAL, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'SQLDEBUG': logging.SQLDEBUG } if hasattr(compSect, "logLevel") and compSect.logLevel in self.logMsg: logging.getLogger().setLevel(self.logMsg[compSect.logLevel]) WMLogging.sqldebug("wmcore level debug:") # If not previously set, force wmcore cache to current path if not os.environ.get('WMCORE_CACHE_DIR'): os.environ['WMCORE_CACHE_DIR'] = os.path.join( compSect.componentDir, '.wmcore_cache') logging.info(">>>Starting: " + compName + '<<<') # check which backend to use: MySQL, Oracle, etc... for core # services. # we recognize there can be more than one database. # be we offer a default database that is used for core services. logging.info(">>>Initializing default database") logging.info(">>>Check if connection is through socket") myThread = threading.currentThread() myThread.logger = logging.getLogger() logging.info(">>>Setting config for thread: ") myThread.config = self.config logging.info(">>>Building database connection string") # check if there is a premade string if not build it yourself. dbConfig = ConfigDBMap(self.config) dbStr = dbConfig.getDBUrl() options = dbConfig.getOption() # we only want one DBFactory per database so we will need to # to pass this on in case we are using threads. myThread.dbFactory = DBFactory(myThread.logger, dbStr, options) myThread.sql_transaction = True if myThread.dbFactory.engine: myThread.dbi = myThread.dbFactory.connect() myThread.transaction = Transaction(myThread.dbi) else: myThread.dbi = myThread.config.CoreDatabase.connectUrl myThread.sql_transaction = False # Attach a worker manager object to the main thread if not hasattr(myThread, 'workerThreadManager'): myThread.workerThreadManager = WorkerThreadManager(self) else: myThread.workerThreadManager.terminateSlaves.clear() myThread.workerThreadManager.pauseWorkers() logging.info(">>>Initialize transaction dictionary") (connectDialect, dummy) = dbStr.split(":", 1) if connectDialect.lower() == 'mysql': myThread.dialect = 'MySQL' elif connectDialect.lower() == 'oracle': myThread.dialect = 'Oracle' logging.info("Harness part constructor finished") except Exception as ex: logging.critical("Problem instantiating " + str(ex)) logging.error("Traceback: %s", str(traceback.format_exc())) raise
param['database'] = dbConfig['dbName'] param['host'] = dbConfig['host'] if dbConfig['portNr']: param['port'] = dbConfig['portNr'] if dbConfig['socketFileLocation']: param['unix_socket'] = dbConfig['socketFileLocation'] #// otherwise use default socket location /tmp except Exception, ex: msg = "Parameter missing \n" msg += str(ex) raise RuntimeError, msg #// Get Logger logger = logging.getLogger('MergeSensorDB') logger.setLevel(logging.ERROR) #// Initializing dbFactory dbFactory = DBFactory(logger, dburl = None, options = param) daoFactory = DAOFactory(package = 'MergeSensor.MergeSensorDB', logger = logger, dbinterface = dbFactory.connect()) return daoFactory #//END getDAOFactory
def __init__(self, connectUrl): logger = logging.getLogger() dbFactory = DBFactory(logger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(logger, self.dbi)
def __init__(self, connectUrl): logger = logging.getLogger() dbFactory = DBFactory(logger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(logger,self.dbi)
def __init__(self, config): """ _init_ """ BaseWorkerThread.__init__(self) myThread = threading.currentThread() self.daoFactory = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = myThread.dbi) self.tier0ConfigFile = config.Tier0Feeder.tier0ConfigFile self.specDirectory = config.Tier0Feeder.specDirectory self.dropboxuser = getattr(config.Tier0Feeder, "dropboxuser", None) self.dropboxpass = getattr(config.Tier0Feeder, "dropboxpass", None) self.transferSystemBaseDir = getattr(config.Tier0Feeder, "transferSystemBaseDir", None) if self.transferSystemBaseDir != None: if not os.path.exists(self.transferSystemBaseDir): self.transferSystemBaseDir = None self.dqmUploadProxy = getattr(config.Tier0Feeder, "dqmUploadProxy", None) self.serviceProxy = getattr(config.Tier0Feeder, "serviceProxy", None) self.localRequestCouchDB = RequestDBWriter(config.AnalyticsDataCollector.localT0RequestDBURL, couchapp = config.AnalyticsDataCollector.RequestCouchApp) hltConfConnectUrl = config.HLTConfDatabase.connectUrl dbFactoryHltConf = DBFactory(logging, dburl = hltConfConnectUrl, options = {}) dbInterfaceHltConf = dbFactoryHltConf.connect() daoFactoryHltConf = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = dbInterfaceHltConf) self.getHLTConfigDAO = daoFactoryHltConf(classname = "RunConfig.GetHLTConfig") storageManagerConnectUrl = config.StorageManagerDatabase.connectUrl dbFactoryStorageManager = DBFactory(logging, dburl = storageManagerConnectUrl, options = {}) self.dbInterfaceStorageManager = dbFactoryStorageManager.connect() self.getExpressReadyRunsDAO = None if hasattr(config, "PopConLogDatabase"): popConLogConnectUrl = getattr(config.PopConLogDatabase, "connectUrl", None) if popConLogConnectUrl != None: dbFactoryPopConLog = DBFactory(logging, dburl = popConLogConnectUrl, options = {}) dbInterfacePopConLog = dbFactoryPopConLog.connect() daoFactoryPopConLog = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = dbInterfacePopConLog) self.getExpressReadyRunsDAO = daoFactoryPopConLog(classname = "Tier0Feeder.GetExpressReadyRuns") self.haveT0DataSvc = False if hasattr(config, "T0DataSvcDatabase"): t0datasvcConnectUrl = getattr(config.T0DataSvcDatabase, "connectUrl", None) if t0datasvcConnectUrl != None: self.haveT0DataSvc = True dbFactoryT0DataSvc = DBFactory(logging, dburl = t0datasvcConnectUrl, options = {}) dbInterfaceT0DataSvc = dbFactoryT0DataSvc.connect() self.daoFactoryT0DataSvc = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = dbInterfaceT0DataSvc) return
def setUp(self): """setup all necessary variables""" url = "oracle://*****:*****@host:port/sid" self.logger = logging.getLogger("dbs test logger") self.dbi = DBFactory(self.logger, url).connect()
class BossLiteDBWMCore(object): """ High level API class for DB queries through WMCore. It allows load/operate/update DB using free format queries """ dbConfig = { 'dialect': '???', 'user': '******', 'username': '******', 'passwd': '???', 'password': '******', 'tnsName': '???', 'host': '???', 'port': '???', 'sid': '???' } def __init__(self, database, dbConfig): """ initialize the API instance """ # get logger self.logger = logging.getLogger() # create an instance of database if isinstance(dbConfig, basestring): self.dbInstance = DBFactory(self.logger, dburl=dbConfig) self.dbConfig = dbConfig else: self.dbInstance = DBFactory(self.logger, options=dbConfig) self.dbConfig.update(dbConfig) # report error if not successful if self.dbInstance is None: self.logger.error("Failed to Initialize BossLiteDBWMCore") return # create a session and db access self.session = None ########################################################################## def connect(self): """ recreate a session and db access """ # create a session and db access if self.session is None: self.session = self.dbInstance.connect() ########################################################################## def close(self): """ close session and db access """ # Does "close" method exist for SQLAlchemy? Not present in DBFactory ... self.session.close() self.session = None ########################################################################## def reset(self): """ reset session and db access """ self.close() self.connect() ########################################################################## def commit(self): """ commit """ # empty method pass ########################################################################## def select(self, query): """ execute a query. """ # db connect self.session.connect() # -> WMCore.Database.ResultSet import ResultSet results = self.session.processData(query) if (results.rowcount > 0): formatter = DBFormatter(self.logger, self.session) out = formatter.format(results) else: out = None return out ########################################################################## def selectOne(self, query): """ execute a query.with only one result expected """ # db connect self.session.connect() # execute query results = self.session.processData(query) if (results.rowcount > 0): formatter = DBFormatter(self.logger, self.session) out = formatter.formatOne(results) else: out = None return out ########################################################################## def modify(self, query): """ execute a query which does not return such as insert/update/delete """ # db connect self.connect() # return query results.... self.session.processData(query) ########################################################################## def updateDB(self, obj): """ update any object table in the DB works for tasks, jobs, runningJobs """ # db connect self.connect() # update obj.update(self.session) ########################################################################## def installDB(self, schemaLocation=None): """ install database """ # ... if schemaLocation is not None: schemaLocation = expandvars(schemaLocation) self.dbConfig.update({'host': schemaLocation}) daofactory = DAOFactory(package="WMCore.Services", logger=self.logger, dbInterface=self.session) mydao = daofactory(classname="BossLite." + self.dbConfig['dialect'] + ".Create") status = mydao.execute() # check creation... return status
def __init__(self, logger, connectUrl, ownerDBS3, ownerDBS2): object.__init__(self) dbFactory = DBFactory(logger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(logger, self.dbi) self.sqlPrimaryKey = { 'AcquisitionEras': 'acquisition_era_name', 'ApplicationExecutables': 'app_exec_id', 'Block': 'block_id', 'BlockParents': 'this_block_id', 'Dataset': 'dataset_id', 'DatasetAccessTypes': 'dataset_access_type_id', 'DatasetOutputModConfigs': 'ds_output_mod_conf_id', 'DatasetParents': 'this_dataset_id', 'DatasetRuns': 'dataset_run_id', 'DataTier': 'data_tier_id', 'Files': 'file_id', 'FileDataTypes': 'file_type_id', 'FileLumis': 'file_lumi_id', 'FileOutputModConfigs': 'file_output_config_id', 'FileParents': 'this_file_id', 'OriginSiteName': 'block_id', 'OutputModule': 'output_mod_config_id', 'ParametersetHashes': 'parameter_set_hash_id', 'PhysicsGroups': 'physics_group_id', 'PrimaryDS': 'primary_ds_id', 'PrimaryDSTypes': 'primary_ds_type_id', 'ProcessedDatasets': 'processed_ds_name', 'ReleaseVersions': 'release_version_id' } self.sqlDict = { 'AcquisitionEras': """SELECT ACQUISITION_ERA_NAME, START_DATE, END_DATE CREATION_DATE, CREATE_BY, DESCRIPTION FROM( SELECT DISTINCT AE.ACQUISITION_ERA_NAME, AE.START_DATE, AE.END_DATE, AE.CREATION_DATE, AE.CREATE_BY, AE.DESCRIPTION FROM {ownerDBS3}.ACQUISITION_ERAS AE UNION ALL SELECT DISTINCT PCD.AQUISITIONERA ACQUISITION_ERA_NAME, 0 START_DATE, NULL END_DATE, NULL CREATION_DATE, NULL CREATE_BY, NULL DESCRIPTION FROM {ownerDBS2}.PROCESSEDDATASET PCD WHERE AQUISITIONERA IS NOT NULL ) GROUP BY ACQUISITION_ERA_NAME, START_DATE, END_DATE, CREATION_DATE, CREATE_BY, DESCRIPTION HAVING COUNT(*) <> 2 ORDER BY ACQUISITION_ERA_NAME """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ApplicationExecutables': """SELECT APP_EXEC_ID, APP_NAME FROM( SELECT AE.APP_EXEC_ID, AE.APP_NAME FROM {ownerDBS3}.APPLICATION_EXECUTABLES AE UNION ALL SELECT AE2.ID APP_EXEC_ID, AE2.EXECUTABLENAME APP_NAME FROM {ownerDBS2}.APPEXECUTABLE AE2 ) GROUP BY APP_EXEC_ID, APP_NAME HAVING COUNT(*) <> 2 ORDER BY APP_EXEC_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'Block': """SELECT BLOCK_ID, BLOCK_NAME, DATASET_ID, PATH, OPEN_FOR_WRITING, ORIGIN_SITE_NAME, BLOCK_SIZE, FILE_COUNT, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY FROM( SELECT BL.BLOCK_ID, BL.BLOCK_NAME, BL.DATASET_ID, DS.DATASET PATH, BL.OPEN_FOR_WRITING, BL.ORIGIN_SITE_NAME, BL.BLOCK_SIZE, BL.FILE_COUNT, BL.CREATION_DATE, BL.CREATE_BY, BL.LAST_MODIFICATION_DATE, BL.LAST_MODIFIED_BY FROM {ownerDBS3}.BLOCKS BL JOIN {ownerDBS3}.DATASETS DS ON BL.DATASET_ID=DS.DATASET_ID UNION ALL SELECT BL2.ID BLOCK_ID, BL2.NAME BLOCK_NAME, BL2.DATASET DATASET_ID, BL2.PATH, BL2.OPENFORWRITING OPEN_FOR_WRITING, 'UNKNOWN' ORIGIN_SITE_NAME, BL2.BLOCKSIZE BLOCK_SIZE, BL2.NUMBEROFFILES FILE_COUNT, BL2.CREATIONDATE CREATION_DATE, PS1.DISTINGUISHEDNAME CREATE_BY, BL2.LASTMODIFICATIONDATE LAST_MODIFICATION_DATE, PS2.DISTINGUISHEDNAME LAST_MODIFIED_BY FROM {ownerDBS2}.BLOCK BL2 JOIN {ownerDBS2}.PERSON PS1 ON BL2.CREATEDBY=PS1.ID JOIN {ownerDBS2}.PERSON PS2 ON BL2.LASTMODIFIEDBY=PS2.ID JOIN {ownerDBS2}.PROCESSEDDATASET DS ON DS.ID=BL2.DATASET JOIN {ownerDBS2}.PRIMARYDATASET PD on DS.PRIMARYDATASET=PD.ID JOIN {ownerDBS2}.DATATIER DT ON DS.DATATIER=DT.ID ) GROUP BY BLOCK_ID, BLOCK_NAME, DATASET_ID, PATH, OPEN_FOR_WRITING, ORIGIN_SITE_NAME, BLOCK_SIZE, FILE_COUNT, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY HAVING COUNT(*) <> 2 ORDER BY BLOCK_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'BlockParents': """SELECT THIS_BLOCK_ID, PARENT_BLOCK_ID FROM ( SELECT BP.THIS_BLOCK_ID, BP.PARENT_BLOCK_ID FROM {ownerDBS3}.BLOCK_PARENTS BP UNION ALL SELECT THISBLOCK this_block_id, ITSPARENT parent_block_id FROM {ownerDBS2}.BLOCKPARENT) GROUP BY THIS_BLOCK_ID,PARENT_BLOCK_ID HAVING COUNT(*) <> 2 ORDER BY THIS_BLOCK_ID, PARENT_BLOCK_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'DataTier': """SELECT DATA_TIER_ID, DATA_TIER_NAME, CREATION_DATE, CREATE_BY FROM( SELECT DT.DATA_TIER_ID, DT.DATA_TIER_NAME, DT.CREATION_DATE, DT.CREATE_BY FROM {ownerDBS3}.DATA_TIERS DT UNION ALL SELECT DT.ID DATA_TIER_ID, DT.NAME DATA_TIER_NAME, DT.CREATIONDATE CREATION_DATE, PS.DISTINGUISHEDNAME CREATE_BY FROM {ownerDBS2}.DATATIER DT JOIN {ownerDBS2}.PERSON PS ON PS.ID=DT.CREATEDBY ) GROUP BY DATA_TIER_ID, DATA_TIER_NAME, CREATION_DATE, CREATE_BY HAVING COUNT(*) <> 2 ORDER BY data_tier_id """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'Dataset': """SELECT DATASET_ID, DATASET, XTCROSSSECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY, PRIMARY_DS_NAME, PRIMARY_DS_TYPE, PROCESSED_DS_NAME, DATA_TIER_NAME, DATASET_ACCESS_TYPE, ACQUISITION_ERA_NAME, PROCESSING_ERA_ID, PHYSICS_GROUP_NAME, PREP_ID FROM( SELECT D.DATASET_ID, D.DATASET, D.XTCROSSSECTION, D.CREATION_DATE, D.CREATE_BY, D.LAST_MODIFICATION_DATE, D.LAST_MODIFIED_BY, P.PRIMARY_DS_NAME, PDT.PRIMARY_DS_TYPE, PD.PROCESSED_DS_NAME, DT.DATA_TIER_NAME, DP.DATASET_ACCESS_TYPE, AE.ACQUISITION_ERA_NAME, D.PROCESSING_ERA_ID, PH.PHYSICS_GROUP_NAME, D.PREP_ID FROM {ownerDBS3}.DATASETS D JOIN {ownerDBS3}.PRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID JOIN {ownerDBS3}.PRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID JOIN {ownerDBS3}.PROCESSED_DATASETS PD ON PD.PROCESSED_DS_ID = D.PROCESSED_DS_ID JOIN {ownerDBS3}.DATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID JOIN {ownerDBS3}.DATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID LEFT OUTER JOIN {ownerDBS3}.ACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID LEFT OUTER JOIN {ownerDBS3}.PHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID UNION ALL SELECT DS.ID DATASET_ID, '/' || PD2.NAME || '/' || DS.NAME || '/' || DT2.NAME DATASET, DS.XTCROSSSECTION, DS.CREATIONDATE CREATION_DATE, PS1.DISTINGUISHEDNAME CREATE_BY, DS.LASTMODIFICATIONDATE LAST_MODIFICATION_DATE, PS2.DISTINGUISHEDNAME LAST_MODIFIED_BY, PD2.NAME PRIMARY_DS_NAME, PT.TYPE PRIMARY_DS_TYPE, DS.NAME PROCESSED_DS_NAME, DT2.NAME DATA_TIER_NAME, ST.STATUS DATASET_ACCESS_TYPE, DS.AQUISITIONERA ACQUISITION_ERA_NAME, NULL PROCESSING_ERA_ID, PG.PHYSICSGROUPNAME physics_group_name, NULL PREP_ID FROM {ownerDBS2}.PROCESSEDDATASET DS JOIN {ownerDBS2}.DATATIER DT2 ON DS.DATATIER=DT2.ID JOIN {ownerDBS2}.PRIMARYDATASET PD2 ON PD2.ID=DS.PRIMARYDATASET JOIN {ownerDBS2}.PHYSICSGROUP PG ON PG.ID=DS.PHYSICSGROUP JOIN {ownerDBS2}.PROCDSSTATUS ST ON ST.ID=DS.STATUS JOIN {ownerDBS2}.PERSON PS1 ON DS.CREATEDBY=PS1.ID JOIN {ownerDBS2}.PERSON PS2 ON DS.LASTMODIFIEDBY=PS2.ID JOIN {ownerDBS2}.PRIMARYDSTYPE PT ON PT.ID=PD2.TYPE ) GROUP BY DATASET_ID, DATASET, XTCROSSSECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY, PRIMARY_DS_NAME, PRIMARY_DS_TYPE, PROCESSED_DS_NAME, DATA_TIER_NAME, DATASET_ACCESS_TYPE, ACQUISITION_ERA_NAME, PROCESSING_ERA_ID, PHYSICS_GROUP_NAME, PREP_ID HAVING COUNT(*) <> 2 ORDER BY DATASET_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## ## Some datatypes are not existing anymore in DBS3 'DatasetAccessTypes': """SELECT DATASET_ACCESS_TYPE_ID, DATASET_ACCESS_TYPE FROM( SELECT DAT.DATASET_ACCESS_TYPE_ID, DAT.DATASET_ACCESS_TYPE FROM {ownerDBS3}.DATASET_ACCESS_TYPES DAT UNION ALL SELECT PDS.ID DATASET_ACCESS_TYPE_ID, PDS.STATUS DATASET_ACCESS_TYPE FROM {ownerDBS2}.PROCDSSTATUS PDS WHERE PDS.ID!=3 AND PDS.ID!=4 AND PDS.ID!=21 AND PDS.ID!=61 ) GROUP BY DATASET_ACCESS_TYPE_ID, DATASET_ACCESS_TYPE HAVING COUNT(*) <> 2 ORDER BY DATASET_ACCESS_TYPE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'DatasetOutputModConfigs': """SELECT DS_OUTPUT_MOD_CONF_ID, DATASET_ID, OUTPUT_MOD_CONFIG_ID FROM( SELECT DOMC.DS_OUTPUT_MOD_CONF_ID, DOMC.DATASET_ID, DOMC.OUTPUT_MOD_CONFIG_ID FROM {ownerDBS3}.DATASET_OUTPUT_MOD_CONFIGS DOMC UNION ALL SELECT PA.ID ds_output_mod_conf_id, PA.DATASET dataset_id, PA.ALGORITHM output_mod_config_id FROM {ownerDBS2}.PROCALGO PA ) GROUP BY DS_OUTPUT_MOD_CONF_ID, DATASET_ID, OUTPUT_MOD_CONFIG_ID HAVING COUNT(*) <> 2 ORDER BY DS_OUTPUT_MOD_CONF_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'DatasetParents': """SELECT THIS_DATASET_ID, PARENT_DATASET_ID FROM( SELECT DP.THIS_DATASET_ID, DP.PARENT_DATASET_ID FROM {ownerDBS3}.DATASET_PARENTS DP UNION ALL SELECT DP2.THISDATASET this_dataset_id, DP2.ITSPARENT parent_dataset_id FROM {ownerDBS2}.PROCDSPARENT DP2 ) GROUP BY THIS_DATASET_ID, PARENT_DATASET_ID HAVING COUNT(*) <> 2 ORDER BY this_dataset_id,parent_dataset_id """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'File': """ SELECT FILE_ID, LOGICAL_FILE_NAME, IS_FILE_VALID, DATASET_ID, DATASET, BLOCK_ID, BLOCK_NAME, FILE_TYPE_ID, FILE_TYPE, CHECK_SUM, EVENT_COUNT, FILE_SIZE, BRANCH_HASH_ID, ADLER32, MD5, AUTO_CROSS_SECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY FROM ( SELECT F.FILE_ID, F.LOGICAL_FILE_NAME, F.IS_FILE_VALID, F.DATASET_ID, D.DATASET, F.BLOCK_ID, B.BLOCK_NAME, F.FILE_TYPE_ID, FT.FILE_TYPE, F.CHECK_SUM, F.EVENT_COUNT, F.FILE_SIZE, F.BRANCH_HASH_ID, F.ADLER32, F.MD5, F.AUTO_CROSS_SECTION, F.CREATION_DATE, F.CREATE_BY, F.LAST_MODIFICATION_DATE, F.LAST_MODIFIED_BY FROM {ownerDBS3}.FILES F JOIN {ownerDBS3}.FILE_DATA_TYPES FT ON FT.FILE_TYPE_ID = F.FILE_TYPE_ID JOIN {ownerDBS3}.DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {ownerDBS3}.BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID UNION ALL SELECT FS2.ID file_id, FS2.LOGICALFILENAME logical_file_name, CASE WHEN FST.STATUS='VALID' THEN 1 ELSE 0 END AS IS_FILE_VALID, FS2.DATASET dataset_id, '/' || PD2.NAME || '/' || DS2.NAME || '/' || DT2.NAME dataset, FS2.BLOCK block_id, BL2.NAME block_name, FS2.FILETYPE file_type_id, FT2.TYPE file_type, FS2.CHECKSUM check_sum, FS2.NUMBEROFEVENTS event_count, FS2.FILESIZE file_size, FS2.FILEBRANCH branch_hash_id, FS2.ADLER32, FS2.MD5, FS2.AUTOCROSSSECTION auto_cross_section, FS2.CREATIONDATE creation_date, PS12.DISTINGUISHEDNAME create_by, FS2.LASTMODIFICATIONDATE last_modification_date, PS22.DISTINGUISHEDNAME last_modified_by FROM {ownerDBS2}.FILES FS2 JOIN {ownerDBS2}.PROCESSEDDATASET DS2 ON DS2.ID=FS2.DATASET JOIN {ownerDBS2}.PRIMARYDATASET PD2 on DS2.PRIMARYDATASET=PD2.ID JOIN {ownerDBS2}.DATATIER DT2 ON DS2.DATATIER=DT2.ID JOIN {ownerDBS2}.PERSON PS12 ON FS2.CREATEDBY=PS12.ID JOIN {ownerDBS2}.PERSON PS22 ON FS2.LASTMODIFIEDBY=PS22.ID JOIN {ownerDBS2}.BLOCK BL2 ON FS2.BLOCK=BL2.ID JOIN {ownerDBS2}.FILETYPE FT2 ON FT2.ID=FS2.FILETYPE JOIN {ownerDBS2}.FILESTATUS FST ON FST.ID=FS2.FILESTATUS ) GROUP BY FILE_ID, LOGICAL_FILE_NAME, IS_FILE_VALID, DATASET_ID, DATASET, BLOCK_ID, BLOCK_NAME, FILE_TYPE_ID, FILE_TYPE, CHECK_SUM, EVENT_COUNT, FILE_SIZE, BRANCH_HASH_ID, ADLER32, MD5, AUTO_CROSS_SECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY HAVING COUNT(*) <> 2 ORDER BY FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileDataTypes': """SELECT FILE_TYPE_ID, FILE_TYPE FROM( SELECT FDT.FILE_TYPE_ID, FDT.FILE_TYPE FROM {ownerDBS3}.FILE_DATA_TYPES FDT UNION ALL SELECT FDT2.ID FILE_TYPE_ID, FDT2.TYPE FILE_TYPE FROM {ownerDBS2}.FILETYPE FDT2 ) GROUP BY FILE_TYPE_ID, FILE_TYPE HAVING COUNT(*) <> 2 ORDER BY FILE_TYPE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileLumis': """SELECT RUN_NUM,LUMI_SECTION_NUM,FILE_ID FROM (SELECT FL.RUN_NUM,FL.LUMI_SECTION_NUM,FL.FILE_ID FROM {ownerDBS3}.FILE_LUMIS FL UNION ALL SELECT RU.RUNNUMBER RUN_NUM, LU.LUMISECTIONNUMBER LUMI_SECTION_NUM, FRL.FILEID FILE_ID FROM {ownerDBS2}.FILERUNLUMI FRL JOIN {ownerDBS2}.RUNS RU ON FRL.RUN=RU.ID JOIN {ownerDBS2}.LUMISECTION LU ON FRL.LUMI=LU.ID ) GROUP BY RUN_NUM,LUMI_SECTION_NUM,FILE_ID HAVING COUNT(*) <> 2 ORDER BY FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileLumisMinMax': """SELECT MIN(FRL.FILEID) AS MIN_ID, MAX(FRL.FILEID) AS MAX_ID FROM {ownerDBS2}.FILERUNLUMI FRL """.format(ownerDBS2=ownerDBS2), ############################################## 'FileLumisSplited': """SELECT RUN_NUM,LUMI_SECTION_NUM,FILE_ID FROM (SELECT FL.RUN_NUM,FL.LUMI_SECTION_NUM,FL.FILE_ID FROM {ownerDBS3}.FILE_LUMIS FL UNION ALL SELECT RU.RUNNUMBER RUN_NUM, LU.LUMISECTIONNUMBER LUMI_SECTION_NUM, FRL.FILEID file_id FROM {ownerDBS2}.FILERUNLUMI FRL JOIN {ownerDBS2}.RUNS RU ON FRL.RUN=RU.ID JOIN {ownerDBS2}.LUMISECTION LU ON FRL.LUMI=LU.ID ) WHERE FILE_ID >= :min_id AND FILE_ID <= :max_id GROUP BY RUN_NUM,LUMI_SECTION_NUM,FILE_ID HAVING COUNT(*) <> 2 ORDER BY FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileOutputModConfigs': """SELECT FILE_OUTPUT_CONFIG_ID,FILE_ID,OUTPUT_MOD_CONFIG_ID FROM (SELECT FOMC.FILE_OUTPUT_CONFIG_ID,FOMC.FILE_ID,FOMC.OUTPUT_MOD_CONFIG_ID FROM {ownerDBS3}.FILE_OUTPUT_MOD_CONFIGS FOMC UNION ALL SELECT FA.ID file_output_config_id, FA.FILEID file_id, FA.ALGORITHM output_mod_config_id FROM {ownerDBS2}.FILEALGO FA ) GROUP BY FILE_OUTPUT_CONFIG_ID,FILE_ID,OUTPUT_MOD_CONFIG_ID HAVING COUNT(*) <> 2 ORDER BY FILE_OUTPUT_CONFIG_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileParents': """SELECT THIS_FILE_ID,PARENT_FILE_ID FROM (SELECT FP.THIS_FILE_ID,FP.PARENT_FILE_ID FROM {ownerDBS3}.FILE_PARENTS FP UNION ALL SELECT FP2.THISFILE this_file_id, FP2.ITSPARENT parent_file_id FROM {ownerDBS2}.FILEPARENTAGE FP2) GROUP BY THIS_FILE_ID,PARENT_FILE_ID HAVING COUNT(*) <> 2 ORDER BY THIS_FILE_ID,PARENT_FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'OriginSiteName': """SELECT BLOCK_ID, ORIGIN_SITE_NAME FROM (SELECT DISTINCT B3.BLOCK_ID, B3.ORIGIN_SITE_NAME FROM {ownerDBS3}.BLOCKS B3 WHERE B3.ORIGIN_SITE_NAME!='UNKNOWN' UNION ALL SELECT DISTINCT B1.ID AS BLOCK_ID, SE1.SENAME AS ORIGIN_SITE_NAME FROM {ownerDBS2}.BLOCK B1 JOIN {ownerDBS2}.SEBLOCK SEB1 ON B1.ID=SEB1.BLOCKID JOIN {ownerDBS2}.STORAGEELEMENT SE1 ON SEB1.SEID=SE1.ID WHERE B1.ID IN (SELECT B2.ID FROM {ownerDBS2}.BLOCK B2 JOIN {ownerDBS2}.SEBLOCK SEB2 ON B2.ID=SEB2.BLOCKID JOIN {ownerDBS2}.STORAGEELEMENT SE2 ON SEB2.SEID=SE2.ID WHERE B2.ID=B1.ID GROUP BY B2.ID HAVING COUNT(B2.ID)=1) ) GROUP BY BLOCK_ID, ORIGIN_SITE_NAME HAVING COUNT(*)<>2 ORDER BY BLOCK_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'OutputModule': """SELECT OUTPUT_MOD_CONFIG_ID, APP_NAME, RELEASE_VERSION, PARAMETER_SET_HASH_ID, PSET_HASH, pset_name, OUTPUT_MODULE_LABEL, GLOBAL_TAG, SCENARIO, CREATION_DATE, CREATE_BY FROM( SELECT O.OUTPUT_MOD_CONFIG_ID, AE.APP_NAME, RV.RELEASE_VERSION, PSH.PARAMETER_SET_HASH_ID, PSH.PSET_HASH, PSH.PSET_NAME, O.OUTPUT_MODULE_LABEL, O.GLOBAL_TAG, O.SCENARIO, O.CREATION_DATE, O.CREATE_BY FROM {ownerDBS3}.OUTPUT_MODULE_CONFIGS O JOIN {ownerDBS3}.APPLICATION_EXECUTABLES AE ON O.APP_EXEC_ID=AE.APP_EXEC_ID JOIN {ownerDBS3}.RELEASE_VERSIONS RV ON O.RELEASE_VERSION_ID=RV.RELEASE_VERSION_ID JOIN {ownerDBS3}.PARAMETER_SET_HASHES PSH ON O.PARAMETER_SET_HASH_ID=PSH.PARAMETER_SET_HASH_ID UNION ALL SELECT DISTINCT AC.ID OUTPUT_MOD_CONFIG_ID, APPEX.EXECUTABLENAME APP_NAME, APPVER.VERSION RELEASE_VERSION, AC.PARAMETERSETID PARAMETER_SET_HASH_ID, QPS.HASH PSET_HASH, QPS.NAME PSET_NAME, TO_CHAR(AC.APPLICATIONFAMILY) OUTPUT_MODULE_LABEL, CASE WHEN (SELECT COUNT(DISTINCT PDS.GLOBALTAG) FROM {ownerDBS2}.PROCALGO PA INNER JOIN {ownerDBS2}.PROCESSEDDATASET PDS ON PA.DATASET = PDS.ID INNER JOIN {ownerDBS2}.ALGORITHMCONFIG AC2 on AC2.ID = PA.ALGORITHM WHERE PDS.GLOBALTAG IS NOT NULL ) = 1 THEN (SELECT DISTINCT PDS.GLOBALTAG FROM {ownerDBS2}.PROCALGO PA LEFT JOIN {ownerDBS2}.PROCESSEDDATASET PDS ON PA.DATASET = PDS.ID WHERE PDS.GLOBALTAG IS NOT NULL AND AC.ID = PA.ALGORITHM) ELSE 'UNKNOWN' END AS GLOBAL_TAG, NULL SCENARIO, AC.CREATIONDATE CREATION_DATE, PS.DISTINGUISHEDNAME CREATE_BY FROM {ownerDBS2}.ALGORITHMCONFIG AC JOIN {ownerDBS2}.APPEXECUTABLE APPEX ON APPEX.ID=AC.EXECUTABLENAME JOIN {ownerDBS2}.APPVERSION APPVER ON APPVER.ID=AC.APPLICATIONVERSION JOIN {ownerDBS2}.PERSON PS ON PS.ID=AC.CREATEDBY JOIN {ownerDBS2}.QUERYABLEPARAMETERSET QPS ON QPS.ID=AC.PARAMETERSETID ) GROUP BY OUTPUT_MOD_CONFIG_ID, APP_NAME, RELEASE_VERSION, PARAMETER_SET_HASH_ID, PSET_HASH, PSET_NAME, OUTPUT_MODULE_LABEL, GLOBAL_TAG, SCENARIO, CREATION_DATE, CREATE_BY HAVING COUNT(*) <> 2 ORDER BY OUTPUT_MOD_CONFIG_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ParametersetHashes': """ SELECT PARAMETER_SET_HASH_ID, PSET_HASH, PSET_NAME FROM( SELECT PH.PARAMETER_SET_HASH_ID, PH.PSET_HASH, PH.PSET_NAME FROM {ownerDBS3}.PARAMETER_SET_HASHES PH UNION ALL SELECT QP.ID PARAMETER_SET_HASH_ID, QP.HASH PSET_HASH, QP.NAME PSET_NAME FROM {ownerDBS2}.QUERYABLEPARAMETERSET QP ) GROUP BY PARAMETER_SET_HASH_ID, PSET_HASH, PSET_NAME HAVING COUNT(*) <> 2 ORDER BY PARAMETER_SET_HASH_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'PhysicsGroups': """SELECT PHYSICS_GROUP_ID, PHYSICS_GROUP_NAME FROM( SELECT PG.PHYSICS_GROUP_ID, PG.PHYSICS_GROUP_NAME FROM {ownerDBS3}.PHYSICS_GROUPS PG UNION ALL SELECT PG2.ID PHYSICS_GROUP_ID, PG2.PHYSICSGROUPNAME PHYSICS_GROUP_NAME FROM {ownerDBS2}.PHYSICSGROUP PG2 ) GROUP BY PHYSICS_GROUP_ID, PHYSICS_GROUP_NAME HAVING COUNT(*) <> 2 ORDER BY PHYSICS_GROUP_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'PrimaryDS': """SELECT PRIMARY_DS_ID, PRIMARY_DS_NAME, CREATION_DATE, CREATE_BY, PRIMARY_DS_TYPE FROM( SELECT P.PRIMARY_DS_ID, P.PRIMARY_DS_NAME, P.CREATION_DATE, P.CREATE_BY, PT.PRIMARY_DS_TYPE FROM {ownerDBS3}.PRIMARY_DATASETS P JOIN {ownerDBS3}.PRIMARY_DS_TYPES PT ON PT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID UNION ALL SELECT PD.ID PRIMARY_DS_ID, PD.NAME PRIMARY_DS_NAME, PD.CREATIONDATE CREATION_DATE, PS.DISTINGUISHEDNAME CREATE_BY, PT2.TYPE PRIMARY_DS_TYPE FROM {ownerDBS2}.PRIMARYDATASET PD JOIN {ownerDBS2}.PERSON PS ON PS.ID=PD.CREATEDBY JOIN {ownerDBS2}.PRIMARYDSTYPE PT2 ON PT2.ID=PD.TYPE ) GROUP BY PRIMARY_DS_ID, PRIMARY_DS_NAME, CREATION_DATE, CREATE_BY, PRIMARY_DS_TYPE HAVING COUNT(*) <> 2 ORDER BY PRIMARY_DS_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'PrimaryDSTypes': """SELECT PRIMARY_DS_TYPE_ID, PRIMARY_DS_TYPE FROM( SELECT PDST.PRIMARY_DS_TYPE_ID, PDST.PRIMARY_DS_TYPE FROM {ownerDBS3}.PRIMARY_DS_TYPES PDST UNION ALL SELECT PDST.ID PRIMARY_DS_TYPE_ID, PDST.TYPE PRIMARY_DS_TYPE FROM {ownerDBS2}.PRIMARYDSTYPE PDST ) GROUP BY PRIMARY_DS_TYPE_ID, PRIMARY_DS_TYPE HAVING COUNT(*) <> 2 ORDER BY PRIMARY_DS_TYPE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ProcessedDatasets': """SELECT PROCESSED_DS_NAME FROM( SELECT DISTINCT PCD.PROCESSED_DS_NAME FROM {ownerDBS3}.PROCESSED_DATASETS PCD UNION ALL SELECT DISTINCT PCD2.NAME PROCESSED_DS_NAME FROM {ownerDBS2}.PROCESSEDDATASET PCD2 ) GROUP BY PROCESSED_DS_NAME HAVING COUNT(*) <> 2 ORDER BY PROCESSED_DS_NAME """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ReleaseVersions': """ SELECT RELEASE_VERSION_ID, RELEASE_VERSION FROM ( SELECT RV.RELEASE_VERSION_ID, RV.RELEASE_VERSION FROM {ownerDBS3}.RELEASE_VERSIONS RV UNION ALL SELECT RV.ID RELEASE_VERSION_ID, RV.VERSION RELEASE_VERSION FROM {ownerDBS2}.APPVERSION RV ) GROUP BY RELEASE_VERSION_ID, RELEASE_VERSION HAVING COUNT(*) <> 2 ORDER BY RELEASE_VERSION_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), }
def __init__(self, logger, tqRef, dbIface = None): """ Constructor. Param logger is a python logger (required). Param tqRef is either a reference to the TQComp object we want to interface with (preferred), or the WMCore.Configuration object that was used to configure it. Param dbIface is optional. If used, it must be a valid WMCore.Database.Transaction object pointing to the DB interface that the TQComp object is using. Otherwise, such interface will be retrieved/reconstructed from the tqRef. Example of how to create an API from a WMCore component: from TQComp.Apis.TQSubmitApi import TQSubmitApi from TQComp.Apis.TQApiData import Task myThread = threading.currentThread() tqApi = TQApi(myThread.logger, self.config, \ myThread.transaction) How to do create an API from the python interpreter: >>> from TQComp.Apis.TQStateApi import TQStateApi >>> import logging >>> mylogger = logging.getLogger("tqclient") >>> confFile = "/pool/TaskQueue/cms_code/WMCore-conf.py" >>> from WMCore import Configuration >>> myconfig = Configuration.loadConfigurationFile(confFile) >>> api = TQApi(mylogger, myconfig, None) For many practical purposes, one can instead use the 'tqclient' command line interface. """ self.logger = logger self.logger.debug("Creating TQApi with params: %s, %s, %s" % \ (logger, type(tqRef), dbIface)) self.transaction = None if dbIface: self.transaction = dbIface if isinstance(tqRef, TQComp): self.tq = tqRef self.conf = None self.dialect = self.tq.dialect if not self.transaction: self.transaction = self.tq.transaction elif isinstance(tqRef, Configuration): self.tq = None self.conf = tqRef self.dialect = self.conf.CoreDatabase.dialect if not self.transaction: options = {} coreSect = self.conf.CoreDatabase if hasattr(coreSect, "socket"): options['unix_socket'] = coreSect.socket if hasattr(coreSect, "connectUrl"): dbStr = coreSect.connectUrl else: dbStr = self.dialect + '://' + coreSect.user + \ ':' + coreSect.passwd+"@"+coreSect.hostname+'/'+\ coreSect.name self.dbFactory = DBFactory(self.logger, dbStr, options) self.dbi = self.dbFactory.connect() self.transaction = Transaction(self.dbi) else: msg = "tqRef should be instance of TQComp or WMCore.Configuration" raise ValueError(msg) # Make things available for Queries (or others relying in myThread) myThread = threading.currentThread() if not hasattr(myThread, 'transaction'): myThread.transaction = self.transaction if not hasattr(myThread, 'logger'): myThread.logger = self.logger if not hasattr(myThread, 'dbi'): myThread.dbi = self.dbi if self.dialect == 'mysql': self.dialect = 'MySQL' self.factory = WMFactory("default", \ "TQComp.Database." + self.dialect) self.queries = self.factory.loadObject("Queries")
def __init__(self, config): """ _init_ """ BaseWorkerThread.__init__(self) myThread = threading.currentThread() self.daoFactory = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=myThread.dbi) self.tier0ConfigFile = config.Tier0Feeder.tier0ConfigFile self.specDirectory = config.Tier0Feeder.specDirectory self.dropboxuser = getattr(config.Tier0Feeder, "dropboxuser", None) self.dropboxpass = getattr(config.Tier0Feeder, "dropboxpass", None) self.dqmUploadProxy = getattr(config.Tier0Feeder, "dqmUploadProxy", None) self.serviceProxy = getattr(config.Tier0Feeder, "serviceProxy", None) self.localRequestCouchDB = RequestDBWriter( config.AnalyticsDataCollector.localT0RequestDBURL, couchapp=config.AnalyticsDataCollector.RequestCouchApp) self.injectedRuns = set() hltConfConnectUrl = config.HLTConfDatabase.connectUrl dbFactoryHltConf = DBFactory(logging, dburl=hltConfConnectUrl, options={}) self.dbInterfaceHltConf = dbFactoryHltConf.connect() daoFactoryHltConf = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=self.dbInterfaceHltConf) self.getHLTConfigDAO = daoFactoryHltConf( classname="RunConfig.GetHLTConfig") storageManagerConnectUrl = config.StorageManagerDatabase.connectUrl dbFactoryStorageManager = DBFactory(logging, dburl=storageManagerConnectUrl, options={}) self.dbInterfaceStorageManager = dbFactoryStorageManager.connect() self.dbInterfaceSMNotify = None if hasattr(config, "SMNotifyDatabase"): smNotifyConnectUrl = config.SMNotifyDatabase.connectUrl dbFactorySMNotify = DBFactory(logging, dburl=smNotifyConnectUrl, options={}) self.dbInterfaceSMNotify = dbFactorySMNotify.connect() self.getExpressReadyRunsDAO = None if hasattr(config, "PopConLogDatabase"): popConLogConnectUrl = getattr(config.PopConLogDatabase, "connectUrl", None) if popConLogConnectUrl != None: dbFactoryPopConLog = DBFactory(logging, dburl=popConLogConnectUrl, options={}) dbInterfacePopConLog = dbFactoryPopConLog.connect() daoFactoryPopConLog = DAOFactory( package="T0.WMBS", logger=logging, dbinterface=dbInterfacePopConLog) self.getExpressReadyRunsDAO = daoFactoryPopConLog( classname="Tier0Feeder.GetExpressReadyRuns") self.haveT0DataSvc = False if hasattr(config, "T0DataSvcDatabase"): t0datasvcConnectUrl = getattr(config.T0DataSvcDatabase, "connectUrl", None) if t0datasvcConnectUrl != None: self.haveT0DataSvc = True dbFactoryT0DataSvc = DBFactory(logging, dburl=t0datasvcConnectUrl, options={}) dbInterfaceT0DataSvc = dbFactoryT0DataSvc.connect() self.daoFactoryT0DataSvc = DAOFactory( package="T0.WMBS", logger=logging, dbinterface=dbInterfaceT0DataSvc) # # Set deployment ID # SetDeploymentIdDAO = self.daoFactory( classname="Tier0Feeder.SetDeploymentID") GetDeploymentIdDAO = self.daoFactory( classname="Tier0Feeder.GetDeploymentID") try: self.deployID = GetDeploymentIdDAO.execute() if self.deployID == 0: self.deployID = int( datetime.datetime.now().strftime("%y%m%d%H%M%S")) SetDeploymentIdDAO.execute(self.deployID) except: logging.exception( "Something went wrong with setting deployment ID") raise return
def __init__(self): url = "oracle://*****:*****@host:port/sid" self.logger = logging.getLogger("dbs test logger") self.dbi = DBFactory(self.logger, url).connect()
bindVars["p_%s" % bindVarCounter] = col bindVarCounter += 1 localT0AST.processData(insertQuery, bindVars, None, None) if len(sys.argv) != 4: print "Usage:" print " ./promptSkimInjector LOCAL_T0AST_URL PROD_T0AST_READER_URL RUN_NUMBER" print "" sys.exit(0) localT0ASTUrl = sys.argv[1] remoteT0ASTUrl = sys.argv[2] runNum = int(sys.argv[3]) localDBFactory = DBFactory(logging, localT0ASTUrl) remoteDBFactory = DBFactory(logging, remoteT0ASTUrl) localDbi = localDBFactory.connect() remoteDbi = remoteDBFactory.connect() print "\nCreating WMBS Schema in local Oracle...", wmbsCreate = Create(logging, localDbi) wmbsCreate.execute() print "done." copyTableNames = ["processed_dataset", "primary_dataset", "data_tier", "dataset_path", "run_status", "block_status", "block_migrate_status", "run_stream_cmssw_assoc", "cmssw_version", "t1skim_config", "phedex_subscription", "storage_node", "run", "block_run_assoc"]
print logging.DEBUG logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M') else: logging.basicConfig( level=logging.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M') logger = logging.getLogger('SiteDB Schema Upgrade') #Connect to the database conn = DBFactory(logger, opts.database).connect() logger.debug('connected to %s' % opts.database) oldsites = get_siteinfo(conn) logger.debug(oldsites) if create_siteinfo(conn): if create_sitelinks(conn): badsites = populate_siteinfo(oldsites, conn) logger.debug(badsites) badsites = check_badsites(badsites, conn) badsites = check_sites(oldsites, badsites, conn) print "trying to migrate %s bad sites" % len(badsites) badsites, goodsites = migrate_badsites(badsites, conn) if len(badsites): badsites = check_sites(goodsites, badsites, conn)
def setUp(self): """setup all necessary parameters""" dburl = os.environ["DBS_TEST_DBURL_READER"] self.logger = logging.getLogger("dbs test logger") self.dbowner = os.environ["DBS_TEST_DBOWNER_READER"] self.dbi = DBFactory(self.logger, dburl).connect()
class BossLiteDBWMCore(object): """ High level API class for DB queries through WMCore. It allows load/operate/update DB using free format queries """ dbConfig = {'dialect': '???', 'user': '******', 'username': '******', 'passwd': '???', 'password': '******', 'tnsName': '???', 'host' : '???', 'port' : '???', 'sid' : '???' } def __init__(self, database, dbConfig): """ initialize the API instance """ # get logger self.logger = logging.getLogger() # create an instance of database if isinstance(dbConfig, basestring): self.dbInstance = DBFactory(self.logger, dburl=dbConfig) self.dbConfig = dbConfig else: self.dbInstance = DBFactory(self.logger, options=dbConfig) self.dbConfig.update( dbConfig ) # report error if not successful if self.dbInstance is None: self.logger.error( "Failed to Initialize BossLiteDBWMCore" ) return # create a session and db access self.session = None ########################################################################## def connect ( self ) : """ recreate a session and db access """ # create a session and db access if self.session is None: self.session = self.dbInstance.connect() ########################################################################## def close ( self ) : """ close session and db access """ # Does "close" method exist for SQLAlchemy? Not present in DBFactory ... self.session.close() self.session = None ########################################################################## def reset ( self ) : """ reset session and db access """ self.close() self.connect() ########################################################################## def commit ( self ) : """ commit """ # empty method pass ########################################################################## def select(self, query): """ execute a query. """ # db connect self.session.connect() # -> WMCore.Database.ResultSet import ResultSet results = self.session.processData(query) if (results.rowcount > 0): formatter = DBFormatter(self.logger, self.session) out = formatter.format(results) else : out = None return out ########################################################################## def selectOne(self, query): """ execute a query.with only one result expected """ # db connect self.session.connect() # execute query results = self.session.processData(query) if (results.rowcount > 0): formatter = DBFormatter(self.logger, self.session) out = formatter.formatOne(results) else : out = None return out ########################################################################## def modify(self, query): """ execute a query which does not return such as insert/update/delete """ # db connect self.connect() # return query results.... self.session.processData(query) ########################################################################## def updateDB( self, obj ) : """ update any object table in the DB works for tasks, jobs, runningJobs """ # db connect self.connect() # update obj.update(self.session) ########################################################################## def installDB( self, schemaLocation = None ) : """ install database """ # ... if schemaLocation is not None: schemaLocation = expandvars( schemaLocation ) self.dbConfig.update({ 'host' : schemaLocation}) daofactory = DAOFactory(package = "WMCore.Services", logger = self.logger, dbInterface = self.session) mydao = daofactory(classname = "BossLite." + self.dbConfig['dialect'] +".Create") status = mydao.execute() # check creation... return status
class DBUserStore(UserStore): def __init__(self, config): UserStore.__init__(self, config) logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M') self.logger = logging.getLogger('OIDDBUserStore') self.conn = DBFactory(self.logger, config.source).connect() def load(self, user): """ Build a dict like: {'permissions' : {role: [sites/groups]}, 'fullname' : user.fullname, 'dn' : user.dn} """ grpsql = """select contact.forename || ' ' || contact.SURNAME as fullname, contact.dn, role.title, user_group.name from contact, role, group_responsibility, user_group where contact.username=:username and contact.id = group_responsibility.contact and group_responsibility.role = role.id and user_group.id = group_responsibility.user_group""" sitesql = """select contact.forename || ' ' || contact.SURNAME as fullname, contact.dn, role.title, siteinfo_v2.cms_name from contact, role, site_responsibility, siteinfo_v2 where contact.username=:username and contact.id = site_responsibility.contact and site_responsibility.role = role.id and siteinfo_v2.id = site_responsibility.site""" userdict = {} data = self.conn.processData([grpsql, sitesql], binds=[{ 'username': user }, { 'username': user }]) for d in data: for r in d.fetchall(): if 'fullname' not in userdict.keys(): userdict['fullname'] = r[0] if 'dn' not in userdict.keys(): userdict['dn'] = r[1] if 'permissions' not in userdict.keys(): userdict['permissions'] = {r[2]: [r[3]]} else: if r[2] in userdict['permissions'].keys(): userdict['permissions'][r[2]].append(r[3]) else: userdict['permissions'][r[2]] = [r[3]] print r return userdict def checkpass(self, user, password): sql = 'select passwd from user_passwd where username = :username' try: data = self.conn.processData(sql, binds={'username': user}) if len(data) == 1: encpassword = data[0].fetchone()[0] return encpassword == crypt(password, encpassword) except Exception, e: self.logger.info(str(e)) return False
def __init__(self, logger, connectUrl, ownerDBS3, ownerDBS2): object.__init__(self) dbFactory = DBFactory(logger, connectUrl, options={}) self.dbi = dbFactory.connect() self.dbFormatter = DBFormatter(logger, self.dbi) self.sqlPrimaryKey = {'AcquisitionEras': 'acquisition_era_name', 'ApplicationExecutables': 'app_exec_id', 'Block': 'block_id', 'BlockParents': 'this_block_id', 'Dataset': 'dataset_id', 'DatasetAccessTypes': 'dataset_access_type_id', 'DatasetOutputModConfigs': 'ds_output_mod_conf_id', 'DatasetParents': 'this_dataset_id', 'DatasetRuns': 'dataset_run_id', 'DataTier': 'data_tier_id', 'Files': 'file_id', 'FileDataTypes': 'file_type_id', 'FileLumis': 'file_lumi_id', 'FileOutputModConfigs': 'file_output_config_id', 'FileParents': 'this_file_id', 'OutputModule': 'output_mod_config_id', 'ParametersetHashes': 'parameter_set_hash_id', 'PhysicsGroups': 'physics_group_id', 'PrimaryDS': 'primary_ds_id', 'PrimaryDSTypes': 'primary_ds_type_id', 'ProcessedDatasets': 'processed_ds_name', 'ReleaseVersions': 'release_version_id'} self.sqlDict = {'AcquisitionEras': """SELECT ACQUISITION_ERA_NAME, START_DATE, END_DATE CREATION_DATE, CREATE_BY, DESCRIPTION FROM( SELECT DISTINCT AE.ACQUISITION_ERA_NAME, AE.START_DATE, AE.END_DATE, AE.CREATION_DATE, AE.CREATE_BY, AE.DESCRIPTION FROM {ownerDBS3}.ACQUISITION_ERAS AE UNION ALL SELECT DISTINCT PCD.AQUISITIONERA ACQUISITION_ERA_NAME, 0 START_DATE, NULL END_DATE, NULL CREATION_DATE, NULL CREATE_BY, NULL DESCRIPTION FROM {ownerDBS2}.PROCESSEDDATASET PCD WHERE AQUISITIONERA IS NOT NULL ) GROUP BY ACQUISITION_ERA_NAME, START_DATE, END_DATE, CREATION_DATE, CREATE_BY, DESCRIPTION HAVING COUNT(*) <> 2 ORDER BY ACQUISITION_ERA_NAME """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ApplicationExecutables': """SELECT APP_EXEC_ID, APP_NAME FROM( SELECT AE.APP_EXEC_ID, AE.APP_NAME FROM {ownerDBS3}.APPLICATION_EXECUTABLES AE UNION ALL SELECT AE2.ID APP_EXEC_ID, AE2.EXECUTABLENAME APP_NAME FROM {ownerDBS2}.APPEXECUTABLE AE2 ) GROUP BY APP_EXEC_ID, APP_NAME HAVING COUNT(*) <> 2 ORDER BY APP_EXEC_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'Block': """SELECT BLOCK_ID, BLOCK_NAME, DATASET_ID, PATH, OPEN_FOR_WRITING, ORIGIN_SITE_NAME, BLOCK_SIZE, FILE_COUNT, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY FROM( SELECT BL.BLOCK_ID, BL.BLOCK_NAME, BL.DATASET_ID, DS.DATASET PATH, BL.OPEN_FOR_WRITING, BL.ORIGIN_SITE_NAME, BL.BLOCK_SIZE, BL.FILE_COUNT, BL.CREATION_DATE, BL.CREATE_BY, BL.LAST_MODIFICATION_DATE, BL.LAST_MODIFIED_BY FROM {ownerDBS3}.BLOCKS BL JOIN {ownerDBS3}.DATASETS DS ON BL.DATASET_ID=DS.DATASET_ID UNION ALL SELECT BL2.ID BLOCK_ID, BL2.NAME BLOCK_NAME, BL2.DATASET DATASET_ID, BL2.PATH, BL2.OPENFORWRITING OPEN_FOR_WRITING, 'UNKNOWN' ORIGIN_SITE_NAME, BL2.BLOCKSIZE BLOCK_SIZE, BL2.NUMBEROFFILES FILE_COUNT, BL2.CREATIONDATE CREATION_DATE, PS1.DISTINGUISHEDNAME CREATE_BY, BL2.LASTMODIFICATIONDATE LAST_MODIFICATION_DATE, PS2.DISTINGUISHEDNAME LAST_MODIFIED_BY FROM {ownerDBS2}.BLOCK BL2 JOIN {ownerDBS2}.PERSON PS1 ON BL2.CREATEDBY=PS1.ID JOIN {ownerDBS2}.PERSON PS2 ON BL2.LASTMODIFIEDBY=PS2.ID JOIN {ownerDBS2}.PROCESSEDDATASET DS ON DS.ID=BL2.DATASET JOIN {ownerDBS2}.PRIMARYDATASET PD on DS.PRIMARYDATASET=PD.ID JOIN {ownerDBS2}.DATATIER DT ON DS.DATATIER=DT.ID ) GROUP BY BLOCK_ID, BLOCK_NAME, DATASET_ID, PATH, OPEN_FOR_WRITING, ORIGIN_SITE_NAME, BLOCK_SIZE, FILE_COUNT, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY HAVING COUNT(*) <> 2 ORDER BY BLOCK_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'BlockParents': """SELECT THIS_BLOCK_ID, PARENT_BLOCK_ID FROM ( SELECT BP.THIS_BLOCK_ID, BP.PARENT_BLOCK_ID FROM {ownerDBS3}.BLOCK_PARENTS BP UNION ALL SELECT THISBLOCK this_block_id, ITSPARENT parent_block_id FROM {ownerDBS2}.BLOCKPARENT) GROUP BY THIS_BLOCK_ID,PARENT_BLOCK_ID HAVING COUNT(*) <> 2 ORDER BY THIS_BLOCK_ID, PARENT_BLOCK_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'DataTier': """SELECT DATA_TIER_ID, DATA_TIER_NAME, CREATION_DATE, CREATE_BY FROM( SELECT DT.DATA_TIER_ID, DT.DATA_TIER_NAME, DT.CREATION_DATE, DT.CREATE_BY FROM {ownerDBS3}.DATA_TIERS DT UNION ALL SELECT DT.ID DATA_TIER_ID, DT.NAME DATA_TIER_NAME, DT.CREATIONDATE CREATION_DATE, PS.DISTINGUISHEDNAME CREATE_BY FROM {ownerDBS2}.DATATIER DT JOIN {ownerDBS2}.PERSON PS ON PS.ID=DT.CREATEDBY ) GROUP BY DATA_TIER_ID, DATA_TIER_NAME, CREATION_DATE, CREATE_BY HAVING COUNT(*) <> 2 ORDER BY data_tier_id """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'Dataset': """SELECT DATASET_ID, DATASET, XTCROSSSECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY, PRIMARY_DS_NAME, PRIMARY_DS_TYPE, PROCESSED_DS_NAME, DATA_TIER_NAME, DATASET_ACCESS_TYPE, ACQUISITION_ERA_NAME, PROCESSING_ERA_ID, PHYSICS_GROUP_NAME, PREP_ID FROM( SELECT D.DATASET_ID, D.DATASET, D.XTCROSSSECTION, D.CREATION_DATE, D.CREATE_BY, D.LAST_MODIFICATION_DATE, D.LAST_MODIFIED_BY, P.PRIMARY_DS_NAME, PDT.PRIMARY_DS_TYPE, PD.PROCESSED_DS_NAME, DT.DATA_TIER_NAME, DP.DATASET_ACCESS_TYPE, AE.ACQUISITION_ERA_NAME, D.PROCESSING_ERA_ID, PH.PHYSICS_GROUP_NAME, D.PREP_ID FROM {ownerDBS3}.DATASETS D JOIN {ownerDBS3}.PRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID JOIN {ownerDBS3}.PRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID JOIN {ownerDBS3}.PROCESSED_DATASETS PD ON PD.PROCESSED_DS_ID = D.PROCESSED_DS_ID JOIN {ownerDBS3}.DATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID JOIN {ownerDBS3}.DATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID LEFT OUTER JOIN {ownerDBS3}.ACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID LEFT OUTER JOIN {ownerDBS3}.PHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID UNION ALL SELECT DS.ID DATASET_ID, '/' || PD2.NAME || '/' || DS.NAME || '/' || DT2.NAME DATASET, DS.XTCROSSSECTION, DS.CREATIONDATE CREATION_DATE, PS1.DISTINGUISHEDNAME CREATE_BY, DS.LASTMODIFICATIONDATE LAST_MODIFICATION_DATE, PS2.DISTINGUISHEDNAME LAST_MODIFIED_BY, PD2.NAME PRIMARY_DS_NAME, PT.TYPE PRIMARY_DS_TYPE, DS.NAME PROCESSED_DS_NAME, DT2.NAME DATA_TIER_NAME, ST.STATUS DATASET_ACCESS_TYPE, DS.AQUISITIONERA ACQUISITION_ERA_NAME, NULL PROCESSING_ERA_ID, PG.PHYSICSGROUPNAME physics_group_name, NULL PREP_ID FROM {ownerDBS2}.PROCESSEDDATASET DS JOIN {ownerDBS2}.DATATIER DT2 ON DS.DATATIER=DT2.ID JOIN {ownerDBS2}.PRIMARYDATASET PD2 ON PD2.ID=DS.PRIMARYDATASET JOIN {ownerDBS2}.PHYSICSGROUP PG ON PG.ID=DS.PHYSICSGROUP JOIN {ownerDBS2}.PROCDSSTATUS ST ON ST.ID=DS.STATUS JOIN {ownerDBS2}.PERSON PS1 ON DS.CREATEDBY=PS1.ID JOIN {ownerDBS2}.PERSON PS2 ON DS.LASTMODIFIEDBY=PS2.ID JOIN {ownerDBS2}.PRIMARYDSTYPE PT ON PT.ID=PD2.TYPE ) GROUP BY DATASET_ID, DATASET, XTCROSSSECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY, PRIMARY_DS_NAME, PRIMARY_DS_TYPE, PROCESSED_DS_NAME, DATA_TIER_NAME, DATASET_ACCESS_TYPE, ACQUISITION_ERA_NAME, PROCESSING_ERA_ID, PHYSICS_GROUP_NAME, PREP_ID HAVING COUNT(*) <> 2 ORDER BY DATASET_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## ## Some datatypes are not existing anymore in DBS3 'DatasetAccessTypes': """SELECT DATASET_ACCESS_TYPE_ID, DATASET_ACCESS_TYPE FROM( SELECT DAT.DATASET_ACCESS_TYPE_ID, DAT.DATASET_ACCESS_TYPE FROM {ownerDBS3}.DATASET_ACCESS_TYPES DAT UNION ALL SELECT PDS.ID DATASET_ACCESS_TYPE_ID, PDS.STATUS DATASET_ACCESS_TYPE FROM {ownerDBS2}.PROCDSSTATUS PDS WHERE PDS.ID!=3 AND PDS.ID!=4 AND PDS.ID!=21 AND PDS.ID!=61 ) GROUP BY DATASET_ACCESS_TYPE_ID, DATASET_ACCESS_TYPE HAVING COUNT(*) <> 2 ORDER BY DATASET_ACCESS_TYPE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'DatasetOutputModConfigs': """SELECT DS_OUTPUT_MOD_CONF_ID, DATASET_ID, OUTPUT_MOD_CONFIG_ID FROM( SELECT DOMC.DS_OUTPUT_MOD_CONF_ID, DOMC.DATASET_ID, DOMC.OUTPUT_MOD_CONFIG_ID FROM {ownerDBS3}.DATASET_OUTPUT_MOD_CONFIGS DOMC UNION ALL SELECT PA.ID ds_output_mod_conf_id, PA.DATASET dataset_id, PA.ALGORITHM output_mod_config_id FROM {ownerDBS2}.PROCALGO PA ) GROUP BY DS_OUTPUT_MOD_CONF_ID, DATASET_ID, OUTPUT_MOD_CONFIG_ID HAVING COUNT(*) <> 2 ORDER BY DS_OUTPUT_MOD_CONF_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'DatasetParents': """SELECT THIS_DATASET_ID, PARENT_DATASET_ID FROM( SELECT DP.THIS_DATASET_ID, DP.PARENT_DATASET_ID FROM {ownerDBS3}.DATASET_PARENTS DP UNION ALL SELECT DP2.THISDATASET this_dataset_id, DP2.ITSPARENT parent_dataset_id FROM {ownerDBS2}.PROCDSPARENT DP2 ) GROUP BY THIS_DATASET_ID, PARENT_DATASET_ID HAVING COUNT(*) <> 2 ORDER BY this_dataset_id,parent_dataset_id """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'File': """ SELECT FILE_ID, LOGICAL_FILE_NAME, IS_FILE_VALID, DATASET_ID, DATASET, BLOCK_ID, BLOCK_NAME, FILE_TYPE_ID, FILE_TYPE, CHECK_SUM, EVENT_COUNT, FILE_SIZE, BRANCH_HASH_ID, ADLER32, MD5, AUTO_CROSS_SECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY FROM ( SELECT F.FILE_ID, F.LOGICAL_FILE_NAME, F.IS_FILE_VALID, F.DATASET_ID, D.DATASET, F.BLOCK_ID, B.BLOCK_NAME, F.FILE_TYPE_ID, FT.FILE_TYPE, F.CHECK_SUM, F.EVENT_COUNT, F.FILE_SIZE, F.BRANCH_HASH_ID, F.ADLER32, F.MD5, F.AUTO_CROSS_SECTION, F.CREATION_DATE, F.CREATE_BY, F.LAST_MODIFICATION_DATE, F.LAST_MODIFIED_BY FROM {ownerDBS3}.FILES F JOIN {ownerDBS3}.FILE_DATA_TYPES FT ON FT.FILE_TYPE_ID = F.FILE_TYPE_ID JOIN {ownerDBS3}.DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {ownerDBS3}.BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID UNION ALL SELECT FS2.ID file_id, FS2.LOGICALFILENAME logical_file_name, CASE WHEN FST.STATUS='VALID' THEN 1 ELSE 0 END AS IS_FILE_VALID, FS2.DATASET dataset_id, '/' || PD2.NAME || '/' || DS2.NAME || '/' || DT2.NAME dataset, FS2.BLOCK block_id, BL2.NAME block_name, FS2.FILETYPE file_type_id, FT2.TYPE file_type, FS2.CHECKSUM check_sum, FS2.NUMBEROFEVENTS event_count, FS2.FILESIZE file_size, FS2.FILEBRANCH branch_hash_id, FS2.ADLER32, FS2.MD5, FS2.AUTOCROSSSECTION auto_cross_section, FS2.CREATIONDATE creation_date, PS12.DISTINGUISHEDNAME create_by, FS2.LASTMODIFICATIONDATE last_modification_date, PS22.DISTINGUISHEDNAME last_modified_by FROM {ownerDBS2}.FILES FS2 JOIN {ownerDBS2}.PROCESSEDDATASET DS2 ON DS2.ID=FS2.DATASET JOIN {ownerDBS2}.PRIMARYDATASET PD2 on DS2.PRIMARYDATASET=PD2.ID JOIN {ownerDBS2}.DATATIER DT2 ON DS2.DATATIER=DT2.ID JOIN {ownerDBS2}.PERSON PS12 ON FS2.CREATEDBY=PS12.ID JOIN {ownerDBS2}.PERSON PS22 ON FS2.LASTMODIFIEDBY=PS22.ID JOIN {ownerDBS2}.BLOCK BL2 ON FS2.BLOCK=BL2.ID JOIN {ownerDBS2}.FILETYPE FT2 ON FT2.ID=FS2.FILETYPE JOIN {ownerDBS2}.FILESTATUS FST ON FST.ID=FS2.FILESTATUS ) GROUP BY FILE_ID, LOGICAL_FILE_NAME, IS_FILE_VALID, DATASET_ID, DATASET, BLOCK_ID, BLOCK_NAME, FILE_TYPE_ID, FILE_TYPE, CHECK_SUM, EVENT_COUNT, FILE_SIZE, BRANCH_HASH_ID, ADLER32, MD5, AUTO_CROSS_SECTION, CREATION_DATE, CREATE_BY, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY HAVING COUNT(*) <> 2 ORDER BY FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileDataTypes': """SELECT FILE_TYPE_ID, FILE_TYPE FROM( SELECT FDT.FILE_TYPE_ID, FDT.FILE_TYPE FROM {ownerDBS3}.FILE_DATA_TYPES FDT UNION ALL SELECT FDT2.ID FILE_TYPE_ID, FDT2.TYPE FILE_TYPE FROM {ownerDBS2}.FILETYPE FDT2 ) GROUP BY FILE_TYPE_ID, FILE_TYPE HAVING COUNT(*) <> 2 ORDER BY FILE_TYPE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileLumis': """SELECT RUN_NUM,LUMI_SECTION_NUM,FILE_ID FROM (SELECT FL.RUN_NUM,FL.LUMI_SECTION_NUM,FL.FILE_ID FROM {ownerDBS3}.FILE_LUMIS FL UNION ALL SELECT RU.RUNNUMBER RUN_NUM, LU.LUMISECTIONNUMBER LUMI_SECTION_NUM, FRL.FILEID FILE_ID FROM {ownerDBS2}.FILERUNLUMI FRL JOIN {ownerDBS2}.RUNS RU ON FRL.RUN=RU.ID JOIN {ownerDBS2}.LUMISECTION LU ON FRL.LUMI=LU.ID ) GROUP BY RUN_NUM,LUMI_SECTION_NUM,FILE_ID HAVING COUNT(*) <> 2 ORDER BY FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileLumisMinMax': """SELECT MIN(FRL.FILEID) AS MIN_ID, MAX(FRL.FILEID) AS MAX_ID FROM {ownerDBS2}.FILERUNLUMI FRL """.format(ownerDBS2=ownerDBS2), ############################################## 'FileLumisSplited': """SELECT RUN_NUM,LUMI_SECTION_NUM,FILE_ID FROM (SELECT FL.RUN_NUM,FL.LUMI_SECTION_NUM,FL.FILE_ID FROM {ownerDBS3}.FILE_LUMIS FL UNION ALL SELECT RU.RUNNUMBER RUN_NUM, LU.LUMISECTIONNUMBER LUMI_SECTION_NUM, FRL.FILEID file_id FROM {ownerDBS2}.FILERUNLUMI FRL JOIN {ownerDBS2}.RUNS RU ON FRL.RUN=RU.ID JOIN {ownerDBS2}.LUMISECTION LU ON FRL.LUMI=LU.ID ) WHERE FILE_ID >= :min_id AND FILE_ID <= :max_id GROUP BY RUN_NUM,LUMI_SECTION_NUM,FILE_ID HAVING COUNT(*) <> 2 ORDER BY FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileOutputModConfigs': """SELECT FILE_OUTPUT_CONFIG_ID,FILE_ID,OUTPUT_MOD_CONFIG_ID FROM (SELECT FOMC.FILE_OUTPUT_CONFIG_ID,FOMC.FILE_ID,FOMC.OUTPUT_MOD_CONFIG_ID FROM {ownerDBS3}.FILE_OUTPUT_MOD_CONFIGS FOMC UNION ALL SELECT FA.ID file_output_config_id, FA.FILEID file_id, FA.ALGORITHM output_mod_config_id FROM {ownerDBS2}.FILEALGO FA ) GROUP BY FILE_OUTPUT_CONFIG_ID,FILE_ID,OUTPUT_MOD_CONFIG_ID HAVING COUNT(*) <> 2 ORDER BY FILE_OUTPUT_CONFIG_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'FileParents': """SELECT THIS_FILE_ID,PARENT_FILE_ID FROM (SELECT FP.THIS_FILE_ID,FP.PARENT_FILE_ID FROM {ownerDBS3}.FILE_PARENTS FP UNION ALL SELECT FP2.THISFILE this_file_id, FP2.ITSPARENT parent_file_id FROM {ownerDBS2}.FILEPARENTAGE FP2) GROUP BY THIS_FILE_ID,PARENT_FILE_ID HAVING COUNT(*) <> 2 ORDER BY THIS_FILE_ID,PARENT_FILE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'OutputModule': """SELECT OUTPUT_MOD_CONFIG_ID, APP_NAME, RELEASE_VERSION, PARAMETER_SET_HASH_ID, PSET_HASH, pset_name, OUTPUT_MODULE_LABEL, GLOBAL_TAG, SCENARIO, CREATION_DATE, CREATE_BY FROM( SELECT O.OUTPUT_MOD_CONFIG_ID, AE.APP_NAME, RV.RELEASE_VERSION, PSH.PARAMETER_SET_HASH_ID, PSH.PSET_HASH, PSH.PSET_NAME, O.OUTPUT_MODULE_LABEL, O.GLOBAL_TAG, O.SCENARIO, O.CREATION_DATE, O.CREATE_BY FROM {ownerDBS3}.OUTPUT_MODULE_CONFIGS O JOIN {ownerDBS3}.APPLICATION_EXECUTABLES AE ON O.APP_EXEC_ID=AE.APP_EXEC_ID JOIN {ownerDBS3}.RELEASE_VERSIONS RV ON O.RELEASE_VERSION_ID=RV.RELEASE_VERSION_ID JOIN {ownerDBS3}.PARAMETER_SET_HASHES PSH ON O.PARAMETER_SET_HASH_ID=PSH.PARAMETER_SET_HASH_ID UNION ALL SELECT DISTINCT AC.ID OUTPUT_MOD_CONFIG_ID, APPEX.EXECUTABLENAME APP_NAME, APPVER.VERSION RELEASE_VERSION, AC.PARAMETERSETID PARAMETER_SET_HASH_ID, QPS.HASH PSET_HASH, QPS.NAME PSET_NAME, TO_CHAR(AC.APPLICATIONFAMILY) OUTPUT_MODULE_LABEL, CASE WHEN (SELECT COUNT(DISTINCT PDS.GLOBALTAG) FROM {ownerDBS2}.PROCALGO PA INNER JOIN {ownerDBS2}.PROCESSEDDATASET PDS ON PA.DATASET = PDS.ID INNER JOIN {ownerDBS2}.ALGORITHMCONFIG AC2 on AC2.ID = PA.ALGORITHM WHERE PDS.GLOBALTAG IS NOT NULL ) = 1 THEN (SELECT DISTINCT PDS.GLOBALTAG FROM {ownerDBS2}.PROCALGO PA LEFT JOIN {ownerDBS2}.PROCESSEDDATASET PDS ON PA.DATASET = PDS.ID WHERE PDS.GLOBALTAG IS NOT NULL AND AC.ID = PA.ALGORITHM) ELSE 'UNKNOWN' END AS GLOBAL_TAG, NULL SCENARIO, AC.CREATIONDATE CREATION_DATE, PS.DISTINGUISHEDNAME CREATE_BY FROM {ownerDBS2}.ALGORITHMCONFIG AC JOIN {ownerDBS2}.APPEXECUTABLE APPEX ON APPEX.ID=AC.EXECUTABLENAME JOIN {ownerDBS2}.APPVERSION APPVER ON APPVER.ID=AC.APPLICATIONVERSION JOIN {ownerDBS2}.PERSON PS ON PS.ID=AC.CREATEDBY JOIN {ownerDBS2}.QUERYABLEPARAMETERSET QPS ON QPS.ID=AC.PARAMETERSETID ) GROUP BY OUTPUT_MOD_CONFIG_ID, APP_NAME, RELEASE_VERSION, PARAMETER_SET_HASH_ID, PSET_HASH, PSET_NAME, OUTPUT_MODULE_LABEL, GLOBAL_TAG, SCENARIO, CREATION_DATE, CREATE_BY HAVING COUNT(*) <> 2 ORDER BY OUTPUT_MOD_CONFIG_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ParametersetHashes': """ SELECT PARAMETER_SET_HASH_ID, PSET_HASH, PSET_NAME FROM( SELECT PH.PARAMETER_SET_HASH_ID, PH.PSET_HASH, PH.PSET_NAME FROM {ownerDBS3}.PARAMETER_SET_HASHES PH UNION ALL SELECT QP.ID PARAMETER_SET_HASH_ID, QP.HASH PSET_HASH, QP.NAME PSET_NAME FROM {ownerDBS2}.QUERYABLEPARAMETERSET QP ) GROUP BY PARAMETER_SET_HASH_ID, PSET_HASH, PSET_NAME HAVING COUNT(*) <> 2 ORDER BY PARAMETER_SET_HASH_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'PhysicsGroups': """SELECT PHYSICS_GROUP_ID, PHYSICS_GROUP_NAME FROM( SELECT PG.PHYSICS_GROUP_ID, PG.PHYSICS_GROUP_NAME FROM {ownerDBS3}.PHYSICS_GROUPS PG UNION ALL SELECT PG2.ID PHYSICS_GROUP_ID, PG2.PHYSICSGROUPNAME PHYSICS_GROUP_NAME FROM {ownerDBS2}.PHYSICSGROUP PG2 ) GROUP BY PHYSICS_GROUP_ID, PHYSICS_GROUP_NAME HAVING COUNT(*) <> 2 ORDER BY PHYSICS_GROUP_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'PrimaryDS': """SELECT PRIMARY_DS_ID, PRIMARY_DS_NAME, CREATION_DATE, CREATE_BY, PRIMARY_DS_TYPE FROM( SELECT P.PRIMARY_DS_ID, P.PRIMARY_DS_NAME, P.CREATION_DATE, P.CREATE_BY, PT.PRIMARY_DS_TYPE FROM {ownerDBS3}.PRIMARY_DATASETS P JOIN {ownerDBS3}.PRIMARY_DS_TYPES PT ON PT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID UNION ALL SELECT PD.ID PRIMARY_DS_ID, PD.NAME PRIMARY_DS_NAME, PD.CREATIONDATE CREATION_DATE, PS.DISTINGUISHEDNAME CREATE_BY, PT2.TYPE PRIMARY_DS_TYPE FROM {ownerDBS2}.PRIMARYDATASET PD JOIN {ownerDBS2}.PERSON PS ON PS.ID=PD.CREATEDBY JOIN {ownerDBS2}.PRIMARYDSTYPE PT2 ON PT2.ID=PD.TYPE ) GROUP BY PRIMARY_DS_ID, PRIMARY_DS_NAME, CREATION_DATE, CREATE_BY, PRIMARY_DS_TYPE HAVING COUNT(*) <> 2 ORDER BY PRIMARY_DS_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'PrimaryDSTypes': """SELECT PRIMARY_DS_TYPE_ID, PRIMARY_DS_TYPE FROM( SELECT PDST.PRIMARY_DS_TYPE_ID, PDST.PRIMARY_DS_TYPE FROM {ownerDBS3}.PRIMARY_DS_TYPES PDST UNION ALL SELECT PDST.ID PRIMARY_DS_TYPE_ID, PDST.TYPE PRIMARY_DS_TYPE FROM {ownerDBS2}.PRIMARYDSTYPE PDST ) GROUP BY PRIMARY_DS_TYPE_ID, PRIMARY_DS_TYPE HAVING COUNT(*) <> 2 ORDER BY PRIMARY_DS_TYPE_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ProcessedDatasets': """SELECT PROCESSED_DS_NAME FROM( SELECT DISTINCT PCD.PROCESSED_DS_NAME FROM {ownerDBS3}.PROCESSED_DATASETS PCD UNION ALL SELECT DISTINCT PCD2.NAME PROCESSED_DS_NAME FROM {ownerDBS2}.PROCESSEDDATASET PCD2 ) GROUP BY PROCESSED_DS_NAME HAVING COUNT(*) <> 2 ORDER BY PROCESSED_DS_NAME """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), ############################################## 'ReleaseVersions': """ SELECT RELEASE_VERSION_ID, RELEASE_VERSION FROM ( SELECT RV.RELEASE_VERSION_ID, RV.RELEASE_VERSION FROM {ownerDBS3}.RELEASE_VERSIONS RV UNION ALL SELECT RV.ID RELEASE_VERSION_ID, RV.VERSION RELEASE_VERSION FROM {ownerDBS2}.APPVERSION RV ) GROUP BY RELEASE_VERSION_ID, RELEASE_VERSION HAVING COUNT(*) <> 2 ORDER BY RELEASE_VERSION_ID """.format(ownerDBS3=ownerDBS3, ownerDBS2=ownerDBS2), }