def execute(self, conn, logical_file_name, is_file_valid, lost, transaction=False): """ for a given file or a list of files """ if not conn: dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/File/UpdateStatus. Expects db connection from upper layer.") binds = dict(myuser=dbsUtils().getCreateBy(), mydate=dbsUtils().getTime(), is_file_valid=is_file_valid) if isinstance(logical_file_name, list): lfn_generator, lfn_binds = create_token_generator(logical_file_name) ###with clause - subquery factory does only work with select statements, therefore lfn_generator ###has to be place in front of the SELECT statement in the WHERE clause ###http://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:8120272301765 wheresql = """WHERE F.LOGICAL_FILE_NAME in ({lfn_generator} SELECT TOKEN FROM TOKEN_GENERATOR) """.format(lfn_generator=lfn_generator) binds.update(lfn_binds) else: wheresql = "where F.LOGICAL_FILE_NAME=:logical_file_name" binds.update(logical_file_name=logical_file_name) if lost: sql = "{sql}, file_size=0 {wheresql}".format(sql=self.sql, wheresql=wheresql) else: sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) self.dbi.processData(sql, binds, conn, transaction)
def execute(self, conn, logical_file_name, is_file_valid, lost, dataset, transaction=False): """ for a given file or a list of files """ binds = dict(myuser=dbsUtils().getCreateBy(), mydate=dbsUtils().getTime(), is_file_valid=is_file_valid) if logical_file_name and isinstance(logical_file_name, list): lfn_generator, lfn_binds = create_token_generator(logical_file_name) ###with clause - subquery factory does only work with select statements, therefore lfn_generator ###has to be place in front of the SELECT statement in the WHERE clause ###http://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:8120272301765 wheresql = """WHERE F.LOGICAL_FILE_NAME in ({lfn_generator} SELECT TOKEN FROM TOKEN_GENERATOR) """.format(lfn_generator=lfn_generator) binds.update(lfn_binds) elif logical_file_name : wheresql = "where F.LOGICAL_FILE_NAME=:logical_file_name" binds.update(logical_file_name=logical_file_name) elif dataset: wheresql = """ where F.dataset_id in ( select D.dataset_id from {owner}DATASETS D inner join {owner}FILES F2 on F2.dataset_id = D.dataset_id Where D.dataset=:dataset) """.format(owner=self.owner) binds.update(dataset=dataset) if lost: sql = "{sql}, file_size=0 {wheresql}".format(sql=self.sql, wheresql=wheresql) else: sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) self.dbi.processData(sql, binds, conn, transaction)
def execute(self, conn, logical_file_name, block_name, block_id, transaction=False): """ Lists all primary datasets if pattern is not provided. """ binds = {} sql = '' if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator(logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner) wheresql = "WHERE B.BLOCK_NAME = :block_name" binds = {"block_name": block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) elif block_id: wheresql = "WHERE F.BLOCK_ID = :block_id" binds = {"block_id": block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) else: dbsExceptionHandler('dbsException-invalid-input', "Logical_file_names is required for listChild dao.", self.logger.exception) cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
def execute(self, conn, logical_file_name='', block_id=0, block_name='', transaction=False): """ return {} if condition is not provided. """ if not conn: dbsExceptionHandler("dbsException-db-conn-failed","Oracle/FileParent/List. Expects db connection from upper layer.") sql = '' binds = {} if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator(logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_id != 0: wheresql = "WHERE F.BLOCK_ID = :block_id" binds ={'block_id': block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner) wheresql = "WHERE B.BLOCK_NAME= :block_name" binds ={'block_name': block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) else: return{} cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) result = self.formatCursor(cursors[0]) return result
def execute(self, conn, run_num=-1, logical_file_name="", block_name="", dataset="", trans=False): """ Lists all primary datasets if pattern is not provided. """ if not conn: dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/DatasetRun/List. Expects db connection from upper layer.") sql = self.sql binds = {} if logical_file_name and "%" not in logical_file_name: sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID WHERE FILES.LOGICAL_FILE_NAME = :logical_file_name"""%(self.owner) binds["logical_file_name"] = logical_file_name elif block_name and "%" not in block_name: sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID inner join %sBLOCKS BLOCKS on BLOCKS.BLOCK_ID = FILES.BLOCK_ID WHERE BLOCKS.BLOCK_NAME = :block_name """%(self.owner, self.owner) binds["block_name"] = block_name elif dataset and "%" not in dataset: sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID inner join %sDATASETS DATASETS on DATASETS.DATASET_ID = FILES.DATASET_ID WHERE DATASETS.DATASET = :dataset """%(self.owner, self.owner) binds["dataset"] = dataset else: pass if run_num != -1: andorwhere = ("WHERE", "AND")["WHERE" in sql] run_list = [] wheresql_run_list = '' wheresql_run_range = '' # for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.") wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " %s (" %andorwhere + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " %s " %andorwhere + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " %s " %andorwhere + wheresql_run_list #self.logger.debug(sql) cursors = self.dbi.processData(sql, binds, conn, transaction=trans, returnCursor=True) result=[] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, run_num=-1, logical_file_name="", block_name="", dataset="", trans=False): """ Lists all primary datasets if pattern is not provided. """ if not conn: dbsExceptionHandler("dbsException-db-conn-failed","Oracle/DatasetRun/List. Expects db connection from upper layer.") sql = self.sql binds = {} if logical_file_name and "%" not in logical_file_name: sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID WHERE FILES.LOGICAL_FILE_NAME = :logical_file_name"""%(self.owner) binds["logical_file_name"] = logical_file_name elif block_name and "%" not in block_name: sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID inner join %sBLOCKS BLOCKS on BLOCKS.BLOCK_ID = FILES.BLOCK_ID WHERE BLOCKS.BLOCK_NAME = :block_name """%(self.owner, self.owner) binds["block_name"] = block_name elif dataset and "%" not in dataset: sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID inner join %sDATASETS DATASETS on DATASETS.DATASET_ID = FILES.DATASET_ID WHERE DATASETS.DATASET = :dataset """%(self.owner, self.owner) binds["dataset"] = dataset else: pass if run_num != -1: andorwhere = ("WHERE", "AND")["WHERE" in sql] run_list = [] wheresql_run_list = '' wheresql_run_range = '' # for r in parseRunRange(run_num): if isinstance(r, str) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.") wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " %s (" %andorwhere + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " %s " %andorwhere + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " %s " %andorwhere + wheresql_run_list #self.logger.debug(sql) cursors = self.dbi.processData(sql, binds, conn, transaction=trans, returnCursor=True) result=[] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, daoinput, transaction = False): """ daoinput keys: migration_status, migration_block_id, migration_request_id """ #print daoinput['migration_block_id'] if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationBlock/Update. Expects db connection from upper layer." ,self.logger.exception) if daoinput['migration_status'] == 1: sql = self.sql + " (MIGRATION_STATUS = 0 or MIGRATION_STATUS = 3)" elif daoinput['migration_status'] == 2 or daoinput['migration_status'] == 3 or daoinput['migration_status'] == 9: sql = self.sql + " MIGRATION_STATUS = 1 " else: dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Expected migration status to be 1, 2, 3, 0r 9" ,self.logger.exception ) #print sql if 'migration_request_id' in daoinput: sql3 = sql + "and MIGRATION_REQUEST_ID =:migration_request_id" result = self.dbi.processData(sql3, daoinput, conn, transaction) elif 'migration_block_id' in daoinput: if type(daoinput['migration_block_id']) is not list: sql2 = sql+ " and MIGRATION_BLOCK_ID =:migration_block_id" result = self.dbi.processData(sql2, daoinput, conn, transaction) else: bk_id_generator, binds2 = create_token_generator(daoinput['migration_block_id']) newdaoinput = {} newdaoinput.update({"migration_status":daoinput["migration_status"], "last_modification_date":daoinput["last_modification_date"]}) newdaoinput.update(binds2) sql2 = sql+ """ and MIGRATION_BLOCK_ID in ({bk_id_generator} SELECT TOKEN FROM TOKEN_GENERATOR) """.format(bk_id_generator=bk_id_generator) result = self.dbi.processData(sql2, newdaoinput, conn, transaction) else: dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Required IDs not in the input", self.logger.exception)
def execute(self, conn, block_name='', child_lfn_list=[], transaction=False): sql = '' binds = {} child_ds_name = '' child_where = '' if not child_lfn_list: # most use cases child_where = " where b.block_name = :block_name )" binds.update({"block_name": block_name}) sql = self.child_sql + child_where + " order by cfid " else: # not commom child_where = """ where b.block_name = :child_block_name and f.logical_file_name in (SELECT TOKEN FROM TOKEN_GENERATOR) )) """ lfn_generator, bind = create_token_generator(child_lfn_list) binds.update(bind) sql = lfn_generator +\ self.child_sql +\ child_where + " order by cfid " print(sql) #r = self.dbi.processData(sql, binds, conn, transaction=transaction) #print(self.format(r)) #return self.format(r) cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) for i in cursors: data = self.formatCursor(i, size=1000) d = {} run_lumi = [] fid = None for i in data: r = i['r'] l = i['l'] f = i['cfid'] if fid is None: fid = f run_lumi.append((r, l)) elif f != fid and fid is not None: d[fid] = run_lumi yield d del d[fid] run_lumi = [] fid = f run_lumi.append((r, l)) else: run_lumi.append((r, l)) d[fid] = run_lumi yield d del run_lumi del d
def execute(self, conn, logical_file_name='', block_id=0, block_name='', transaction=False): """ return {} if condition is not provided. """ if not conn: dbsExceptionHandler( "dbsException-db-conn-failed", "Oracle/FileParent/List. Expects db connection from upper layer." ) sql = '' binds = {} if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator( logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format( lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_id != 0: wheresql = "WHERE F.BLOCK_ID = :block_id" binds = {'block_id': block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format( owner=self.owner) wheresql = "WHERE B.BLOCK_NAME= :block_name" binds = {'block_name': block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) else: return cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, logical_file_name, block_name, block_id, transaction=False): """ Lists all primary datasets if pattern is not provided. """ binds = {} sql = '' if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator( logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format( lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format( owner=self.owner) wheresql = "WHERE B.BLOCK_NAME = :block_name" binds = {"block_name": block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) elif block_id: wheresql = "WHERE F.BLOCK_ID = :block_id" binds = {"block_id": block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) else: dbsExceptionHandler( 'dbsException-invalid-input', "Logical_file_names is required for listChild dao.", self.logger.exception) cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
def execute(self, conn, daoinput, transaction=False): """ daoinput keys: migration_status, migration_block_id, migration_request_id """ #print daoinput['migration_block_id'] if not conn: dbsExceptionHandler( "dbsException-failed-connect2host", "Oracle/MigrationBlock/Update. Expects db connection from upper layer.", self.logger.exception) if daoinput['migration_status'] == 1: sql = self.sql + " (MIGRATION_STATUS = 0 or MIGRATION_STATUS = 3)" elif daoinput['migration_status'] == 2 or daoinput[ 'migration_status'] == 3 or daoinput['migration_status'] == 9: sql = self.sql + " MIGRATION_STATUS = 1 " else: dbsExceptionHandler( "dbsException-conflict-data", "Oracle/MigrationBlock/Update. Expected migration status to be 1, 2, 3, 0r 9", self.logger.exception) #print sql if 'migration_request_id' in daoinput: sql3 = sql + "and MIGRATION_REQUEST_ID =:migration_request_id" result = self.dbi.processData(sql3, daoinput, conn, transaction) elif 'migration_block_id' in daoinput: if type(daoinput['migration_block_id']) is not list: sql2 = sql + " and MIGRATION_BLOCK_ID =:migration_block_id" result = self.dbi.processData(sql2, daoinput, conn, transaction) else: bk_id_generator, binds2 = create_token_generator( daoinput['migration_block_id']) newdaoinput = {} newdaoinput.update({ "migration_status": daoinput["migration_status"], "last_modification_date": daoinput["last_modification_date"] }) newdaoinput.update(binds2) sql2 = sql + """ and MIGRATION_BLOCK_ID in ({bk_id_generator} SELECT TOKEN FROM TOKEN_GENERATOR) """.format(bk_id_generator=bk_id_generator) result = self.dbi.processData(sql2, newdaoinput, conn, transaction) else: dbsExceptionHandler( "dbsException-conflict-data", "Oracle/MigrationBlock/Update. Required IDs not in the input", self.logger.exception)
def execute(self, conn, logical_file_name, is_file_valid, lost, dataset, transaction=False): """ for a given file or a list of files """ if not conn: dbsExceptionHandler( "dbsException-db-conn-failed", "Oracle/File/UpdateStatus. Expects db connection from upper layer." ) binds = dict(myuser=dbsUtils().getCreateBy(), mydate=dbsUtils().getTime(), is_file_valid=is_file_valid) if logical_file_name and isinstance(logical_file_name, list): lfn_generator, lfn_binds = create_token_generator( logical_file_name) ###with clause - subquery factory does only work with select statements, therefore lfn_generator ###has to be place in front of the SELECT statement in the WHERE clause ###http://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:8120272301765 wheresql = """WHERE F.LOGICAL_FILE_NAME in ({lfn_generator} SELECT TOKEN FROM TOKEN_GENERATOR) """.format(lfn_generator=lfn_generator) binds.update(lfn_binds) elif logical_file_name: wheresql = "where F.LOGICAL_FILE_NAME=:logical_file_name" binds.update(logical_file_name=logical_file_name) elif dataset: wheresql = """ where F.dataset_id in ( select D.dataset_id from {owner}DATASETS D inner join {owner}FILES F2 on F2.dataset_id = D.dataset_id Where D.dataset=:dataset) """.format( owner=self.owner) binds.update(dataset=dataset) if lost: sql = "{sql}, file_size=0 {wheresql}".format(sql=self.sql, wheresql=wheresql) else: sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) self.dbi.processData(sql, binds, conn, transaction)
def execute(self, conn, logical_file_name='', block_id=0, block_name='', transaction=False): """ return {} if condition is not provided. """ sql = '' binds = {} if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator(logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_id != 0: wheresql = "WHERE F.BLOCK_ID = :block_id" binds ={'block_id': block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner) wheresql = "WHERE B.BLOCK_NAME= :block_name" binds ={'block_name': block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) else: return cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, logical_file_name, block_name, block_id, transaction=False): """ Lists all primary datasets if pattern is not provided. """ if not conn: dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileParent/ListChild. Expects db connection from upper layer.") binds = {} sql = '' if logical_file_name: if isinstance(logical_file_name, str): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator(logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner) wheresql = "WHERE B.BLOCK_NAME = :block_name" binds = {"block_name": block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) elif block_id: wheresql = "WHERE F.BLOCK_ID = :block_id" binds = {"block_id": block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) else: dbsExceptionHandler('dbsException-invalid-input', "Logical_file_names is required for listChild dao.") cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) result = self.formatCursor(cursors[0]) return result
def execute(self, conn, dataset="", block_name="", logical_file_name="", release_version="", pset_hash="", app_name="", output_module_label="", run_num=-1, origin_site_name="", lumi_list=[], validFileOnly=0, sumOverLumi=0, transaction=False): if not conn: dbsExceptionHandler( "dbsException-failed-connect2host", "Oracle/File/List. Expects db connection from upper layer.", self.logger.exception) sql = self.sql_cond binds = {} sql_sel = self.sql_sel run_generator = '' lfn_generator = '' lumi_generator = '' sql_lumi = '' #import pdb #pdb.set_trace() if run_num != -1: sql_sel = sql_sel.replace("SELECT", "SELECT DISTINCT") + " , FL.RUN_NUM " sql += " JOIN %sFILE_LUMIS FL on FL.FILE_ID=F.FILE_ID " % ( self.owner) if release_version or pset_hash or app_name or output_module_label: sql += """LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID """ % ((self.owner, ) * 5) #FIXME : the status check should only be done with normal/super user #sql += """WHERE F.IS_FILE_VALID = 1""" # for the time being lests list all files #WMAgent requires validaFileOnly. YG 1/30/2015 if int(validFileOnly) == 0: sql += """ WHERE F.IS_FILE_VALID <> -1 """ elif int(validFileOnly) == 1: sql += """ WHERE F.IS_FILE_VALID = 1 AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ else: dbsExceptionHandler("dbsException-invalid-input", "invalid value for validFileOnly.", self.logger.exception) if block_name: if isinstance(block_name, list): dbsExceptionHandler( 'dbsException-invalid-input', 'Input block_name is a list instead of string.', self.logger.exception) sql += " AND B.BLOCK_NAME = :block_name" binds.update({"block_name": block_name}) if logical_file_name: if type(logical_file_name) is not list: op = ("=", "like")["%" in logical_file_name] sql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op binds.update({"logical_file_name": logical_file_name}) if type(logical_file_name) is list: ds_generator, binds2 = create_token_generator( logical_file_name) binds.update(binds2) sql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator = "{ds_generator}".format( ds_generator=ds_generator) if dataset: if isinstance(dataset, list): dbsExceptionHandler( 'dbsException-invalid-input', 'Input dataset is a list instead of string.', self.logger.exception) sql += " AND D.DATASET = :dataset" binds.update({"dataset": dataset}) if release_version: op = ("=", "like")["%" in release_version] sql += " AND RV.RELEASE_VERSION %s :release_version" % op binds.update({"release_version": release_version}) if pset_hash: op = ("=", "like")["%" in pset_hash] sql += " AND PSH.PSET_HASH %s :pset_hash" % op binds.update({"pset_hash": pset_hash}) if app_name: op = ("=", "like")["%" in app_name] sql += " AND AEX.APP_NAME %s :app_name" % op binds.update({"app_name": app_name}) if output_module_label: op = ("=", "like")["%" in output_module_label] sql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label" % op binds.update({"output_module_label": output_module_label}) if (origin_site_name): op = ("=", "like")["%" in origin_site_name] sql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name" % op binds.update({"origin_site_name": origin_site_name}) if run_num != -1 and run_num: # elimnate empty list run_num=[] run_list = [] wheresql_run_list = '' wheresql_run_range = '' wheresql_run_range_ct = 0 try: run_num = long(run_num) sql += " and FL.RUN_NUM = :run_num " binds.update({"run_num": run_num}) except: if isinstance(run_num, basestring): for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) if not lumi_list: if wheresql_run_range_ct > 0: wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % ( (wheresql_run_range_ct, ) * 2) binds.update({ "minrun%s" % wheresql_run_range_ct: int(r[0]) }) binds.update({ "maxrun%s" % wheresql_run_range_ct: int(r[1]) }) wheresql_run_range_ct += 1 else: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) else: dbsExceptionHandler( 'dbsException-invalid-input', "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. ", self.logger.exception) elif type(run_num) is list and len(run_num) == 1: try: run_num = long(run_num[0]) sql += " and FL.RUN_NUM = :run_num " binds.update({"run_num": run_num}) except: for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) if not lumi_list: if wheresql_run_range_ct > 0: wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % ( (wheresql_run_range_ct, ) * 2) binds.update({ "minrun%s" % wheresql_run_range_ct: int(r[0]) }) binds.update({ "maxrun%s" % wheresql_run_range_ct: int(r[1]) }) wheresql_run_range_ct += 1 else: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) else: dbsExceptionHandler( 'dbsException-invalid-input', "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']", self.logger.exception) else: for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance( r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) if not lumi_list: if wheresql_run_range_ct > 0: wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % ( (wheresql_run_range_ct, ) * 2) binds.update({ "minrun%s" % wheresql_run_range_ct: int(r[0]) }) binds.update({ "maxrun%s" % wheresql_run_range_ct: int(r[1]) }) wheresql_run_range_ct += 1 else: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) # if run_list and not lumi_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) #sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " and " + wheresql_run_list # Make sure when we have a lumi_list, there is only ONE run -- YG 14/05/2013 if (lumi_list and len(lumi_list) != 0): if len(run_list) > 1: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) sql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " sql_lumi = " FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " #Do I need to convert lumi_list to be a str list? YG 10/03/13 #Yes, you do. YG lumi_list = map(str, lumi_list) lumi_generator, lumi_binds = create_token_generator(lumi_list) #sql_sel = "{lumi_generator}".format(lumi_generator=lumi_generator) + sql_sel binds.update(lumi_binds) #binds["run_num"]=run_list[0] # if (run_generator and lfn_generator) or (lumi_generator and lfn_generator): dbsExceptionHandler( 'dbsException-invalid-input2', "cannot supply more than one list (lfn, run_num or lumi) at one query", self.logger.exception, "dao/File/list cannot have more than one list (lfn, run_num, lumi) as input pareamters" ) # only one with and generators should be named differently for run and lfn. #sql = run_generator + lfn_generator + lumi_generator + sql_sel + sql else: if run_num != -1 and int(sumOverLumi) == 1: sql_sel = sql_sel.replace('F.EVENT_COUNT,', '') sql = \ 'with myfiles as ( ' + sql_sel + sql + """) select mf.* , (case when badi.file_id = mc.file_id and badi.run_num=mc.run_num and badi.bid is null then null else mc.event_count end) as event_count from myfiles mf, EVENT_COUNT_WITH_LUMI, ( select distinct fl.file_id, fl.run_num, null as bid from %sfile_lumis fl join myfiles my2 on my2.file_id=fl.file_id and my2.run_num=fl.run_num where fl.event_count is null )badi where mf.file_id= mc.file_id and mf.run_num=mc.run_num """%self.owner if not lumi_list: ent_ct = """ (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num from %sfile_lumis fl join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num group by fl.file_id, fl.run_num) mc """ % self.owner sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct) else: ent_ct = lumi_generator + """ (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num from %sfile_lumis fl join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num where sql_lumi group by fl.file_id, fl.run_num) mc """ % self.owner sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct) else: sql = run_generator + lfn_generator + lumi_generator + sql_sel + sql self.logger.debug("SQL: " + sql) self.logger.debug("***********************") self.logger.debug(binds) try: self.logger.debug("******before cursor**********") cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) self.logger.debug("******after cursor**********") except Exception as e: self.logger.error(str(e)) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, dataset="", block_name="", logical_file_name="", release_version="", pset_hash="", app_name="", output_module_label="", run_num=-1, origin_site_name="", lumi_list=[], validFileOnly=0, transaction=False): if not conn: dbsExceptionHandler( "dbsException-db-conn-failed", "Oracle/File/BriefList. Expects db connection from upper layer." ) #import pdb #pdb.set_trace() binds = {} run_generator = '' lfn_generator = '' lumi_generator = '' generatedsql = '' basesql = self.sql joinsql = '' # Check if file is valid. YG 1/29/2015 if int(validFileOnly) == 0: wheresql = " WHERE F.IS_FILE_VALID <> -1 " elif int(validFileOnly) == 1: wheresql = " WHERE F.IS_FILE_VALID = 1 " else: dbsExceptionHandler("dbsException-invalid-input", "invalid value for validFileOnly.") if logical_file_name: if type(logical_file_name) is not list: #for GET call op = ("=", "like")["%" in logical_file_name] wheresql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name " % op binds.update({"logical_file_name": logical_file_name}) elif type(logical_file_name) is list: #for POST call lfn_generator, binds2 = create_token_generator( logical_file_name) binds.update(binds2) wheresql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" #generatedsql = "{lfn_generator}".format(lfn_generator=lfn_generator) if not dataset: if int(validFileOnly) == 1: joinsql += """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ( (self.owner, ) * 2) wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " else: pass if block_name: joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID " % ( self.owner) wheresql += " AND B.BLOCK_NAME = :block_name " binds.update({"block_name": block_name}) if not dataset and int(validFileOnly) == 1: joinsql += """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ( (self.owner, ) * 2) wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " else: pass if dataset: joinsql += """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID """ % ( self.owner) wheresql += " AND D.DATASET = :dataset " binds.update({"dataset": dataset}) if int(validFileOnly) == 1: joinsql += " JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID " % ( self.owner) wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " else: pass if release_version or pset_hash or app_name or output_module_label: joinsql += """ LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID """ % ((self.owner, ) * 2) if release_version: joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID" % ( self.owner) op = ("=", "like")["%" in release_version] wheresql += " AND RV.RELEASE_VERSION %s :release_version" % op binds.update(release_version=release_version) if pset_hash: joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID" % ( self.owner) op = ("=", "like")["%" in pset_hash] wheresql += " AND PSH.PSET_HASH %s :pset_hash" % op binds.update(pset_hash=pset_hash) if app_name: joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID" % ( self.owner) op = ("=", "like")["%" in app_name] wheresql += " AND AEX.APP_NAME %s :app_name" % op binds.update(app_name=app_name) if output_module_label: op = ("=", "like")["%" in output_module_label] wheresql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label" % op binds.update(output_module_label=output_module_label) if (origin_site_name): if not block_name: joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID" % ( self.owner) op = ("=", "like")["%" in origin_site_name] wheresql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name" % op binds.update({"origin_site_name": origin_site_name}) if run_num != -1 and run_num: # elimnate empty list run_num=[] basesql = basesql.replace("SELECT", "SELECT DISTINCT") joinsql += " JOIN %sFILE_LUMIS FL on FL.FILE_ID=F.FILE_ID " % ( self.owner) run_list = [] wheresql_run_list = '' wheresql_run_range = '' wheresql_run_range_ct = 0 try: run_num = long(run_num) wheresql += " and FL.RUN_NUM = :run_num " binds.update({"run_num": run_num}) except: if isinstance(run_num, basestring): for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1." ) if not lumi_list: if wheresql_run_range_ct > 0: wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % ( (wheresql_run_range_ct, ) * 2) binds.update( {"minrun%s" % wheresql_run_range_ct: r[0]}) binds.update( {"maxrun%s" % wheresql_run_range_ct: r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed." ) else: dbsExceptionHandler( 'dbsException-invalid-input', "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. " ) elif type(run_num) is list and len(run_num) == 1: try: run_num = long(run_num[0]) wheresql += " and FL.RUN_NUM = :run_num " binds.update({"run_num": run_num}) except: for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1." ) if not lumi_list: if wheresql_run_range_ct > 0: wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % ( (wheresql_run_range_ct, ) * 2) binds.update({ "minrun%s" % wheresql_run_range_ct: r[0] }) binds.update({ "maxrun%s" % wheresql_run_range_ct: r[1] }) wheresql_run_range_ct += 1 else: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed." ) else: dbsExceptionHandler( 'dbsException-invalid-input', "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']" ) else: for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance( r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1." ) if not lumi_list: if wheresql_run_range_ct > 0: wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % ( (wheresql_run_range_ct, ) * 2) binds.update( {"minrun%s" % wheresql_run_range_ct: r[0]}) binds.update( {"maxrun%s" % wheresql_run_range_ct: r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed." ) if run_list and not lumi_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) binds.update(run_binds) if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and ( " + wheresql_run_range + " ) " elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list # Make sure when we have a lumi_list, there is only ONE run -- YG 14/05/2013 if (lumi_list and len(lumi_list) != 0): if len(run_list) > 1: dbsExceptionHandler( 'dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") wheresql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " #Do I need to convert lumi_list to be a str list? YG 10/03/13 #Yes, you do. YG lumi_list = map(str, lumi_list) lumi_generator, lumi_binds = create_token_generator(lumi_list) binds.update(lumi_binds) #binds["run_num"]=run_list[0] # if (run_generator and lfn_generator) or (lumi_generator and lfn_generator): self.logger.error("run_generator : %s" % run_generator) self.logger.error("lfn_generator: %s" % lfn_generator) self.logger.error("lumi_generator: %s " % lumi_generator) dbsExceptionHandler( 'dbsException-invalid-input2', message= "cannot supply more than one list (lfn, run_num or lumi) at one query. ", serverError= "dao/File/list cannot have more than one list (lfn, run_num or lumi) as input pareamters" ) # only one with and generators should be named differently for run and lfn. #sql = run_generator + lfn_generator + sql_sel + sql else: sql = run_generator + lfn_generator + lumi_generator + basesql + self.fromsql + joinsql + wheresql self.logger.debug("sql=%s" % sql) self.logger.debug("binds=%s" % binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, block_name="", dataset="", run_num=-1, transaction=False): if not conn: dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/SummaryList. Expects db connection from upper layer.") binds = {} whererun = '' run_list = [] wheresql_run_list = '' wheresql_run_range = '' sql = '' if run_num != -1: # for r in parseRunRange(run_num): if isinstance(r, str) or isinstance(r, (long,int)): #if not wheresql_run_list: # wheresql_run_list = " fl.RUN_NUM = :run_list " run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) binds.update(run_binds) if wheresql_run_list and wheresql_run_range: whererun = wheresql_run_range + " or " + wheresql_run_list elif wheresql_run_list: whererun = wheresql_run_list elif wheresql_run_range: whererun = wheresql_run_range if block_name: if run_num != -1: # sql = sql +\ """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} ) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}files f on f.block_id=b.block_id where b.block_name=:block_name and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) )as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun) binds.update({"block_name":block_name}) else: sql = """ select (select count(f.file_id) from %sfiles f join %sblocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name ) as num_file, nvl((select sum(f.event_count) event_count from %sfiles f join %sblocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from %sfiles f join %sblocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name ) as file_size, (select count(block_id) from %sblocks where block_name=:block_name ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from %sfiles f join %sfile_lumis l on l.file_id=f.file_id join %sblocks b on b.BLOCK_ID = f.block_id where b.BLOCK_NAME=:block_name) ) as num_lumi from dual """ %((self.owner,)*10) binds.update({"block_name":block_name}) elif dataset: if run_num != -1: sql = sql + \ """ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id join {owner}files f on f.block_id = b.block_id where d.dataset=:dataset and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun) binds.update({"dataset":dataset}) else: sql = """ \ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset ) as file_size, (select count(b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id where d.dataset=:dataset ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f join {owner}file_lumis l on l.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id where d.dataset=:dataset) ) as num_lumi from dual """.format(owner=self.owner) binds.update({"dataset":dataset}) else: return [] cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result=[] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, logical_file_name='', block_name='', run_num=-1, validFileOnly=0, migration=False): """ Lists lumi section numbers with in a file, a list of files or a block. """ sql = "" wheresql = "" lfn_generator = "" run_generator = "" if logical_file_name and not isinstance(logical_file_name, list): binds = {'logical_file_name': logical_file_name} if int(validFileOnly) == 0: if migration: #migration always call with single file and include all files no matter valid or not. sql = self.sql + """ FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID WHERE F.LOGICAL_FILE_NAME = :logical_file_name """.format(owner=self.owner) else: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID WHERE F.LOGICAL_FILE_NAME = :logical_file_name """.format(owner=self.owner) else: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME = :logical_file_name AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """.format(owner=self.owner) elif logical_file_name and isinstance(logical_file_name, list): sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID """.format(owner=self.owner) lfn_generator, binds = create_token_generator(logical_file_name) if int(validFileOnly) == 0: wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" else: sql = sql + """ JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """.format(owner=self.owner) wheresql = """ WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR) AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=sql, wheresql=wheresql) elif block_name: binds = {'block_name': block_name} if int(validFileOnly) == 0: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID WHERE B.BLOCK_NAME = :block_name""".format(owner=self.owner) else: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID WHERE F.IS_FILE_VALID = 1 AND B.BLOCK_NAME = :block_name AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """.format(owner=self.owner) else: dbsExceptionHandler('dbsException-invalid-input2', "FileLumi/List: Either logocal_file_name or block_name must be provided.", self.logger.exception, "FileLumi/List: Either logocal_file_name or block_name must be provided.") # if run_num != -1: run_list = [] wheresql_run_list='' wheresql_run_range='' for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long) or isinstance(r, str): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input2', "DBS run range must be apart at least by 1.", self.logger.exception, "DBS run range must be apart at least by 1.") wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: if len(run_list) == 1: wheresql_run_list = " fl.RUN_NUM = :single_run " binds.update({"single_run": long(run_list[0])}) else: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " and " + wheresql_run_list self.logger.debug(sql) self.logger.debug(binds) if run_generator and lfn_generator: dbsExceptionHandler('dbsException-invalid-input2', "listFileLumiArray support single list of lfn or run_num. ", self.logger.exception, "listFileLumiArray support single list of lfn or run_num. ") cursors = self.dbi.processData(sql, binds, conn, transaction=False, returnCursor=True) result=[] file_run_lumi={} event_ct=False for i in cursors: result.extend(self.formatCursor(i, size=100)) #for migration, we need flat format to load the data into another DB. #self.logger.error(result) if migration: #YG 09/2015. for item in result: yield item else: if result and result[0]['event_count']: event_ct = True for i in result: r = i['run_num'] f = i['logical_file_name'] if event_ct: file_run_lumi.setdefault((f, r), []).append([i['lumi_section_num'], i['event_count']]) else: file_run_lumi.setdefault((f, r), []).append(i['lumi_section_num']) for k, v in file_run_lumi.iteritems(): if event_ct: lumi=[] event=[] for le in v: lumi.append(le[0]) event.append(le[1]) yield {'logical_file_name':k[0], 'run_num':k[1], 'lumi_section_num':lumi, 'event_count':event} else: yield {'logical_file_name':k[0], 'run_num':k[1], 'lumi_section_num':v} del file_run_lumi del result
def execute(self, conn, block_name="", dataset="", detail=0, transaction=False): binds = {} generatedsql='' if dataset: where_clause = "WHERE DS.dataset=:dataset" if detail: sql = """ with t1 as( SELECT BS.BLOCK_NAME as BLOCK_NAME, NVL(SUM(FS.EVENT_COUNT),0) as NUM_EVENT FROM {owner}FILES FS {block_join} {dataset_join} {where_clause} group by BS.BLOCK_NAME ) select b.block_name as block_name, b.file_count as num_file, b.block_size as file_size, t1.num_event as num_event, b.open_for_writing as open_for_writing from {owner}blocks b, t1 where t1.block_name = b.block_name """.format(owner=self.owner, where_clause=where_clause, dataset_join=self.dataset_join, block_join=self.block_join) else: sql = """SELECT ( SELECT NVL(SUM(BS.BLOCK_SIZE), 0) FROM {owner}BLOCKS BS {dataset_join} {where_clause} ) AS FILE_SIZE, ( SELECT NVL(SUM(BS.FILE_COUNT),0) FROM {owner} BLOCKS BS {dataset_join} {where_clause} ) AS NUM_FILE, ( SELECT NVL(SUM(FS.EVENT_COUNT),0) FROM {owner}FILES FS {block_join} {dataset_join} {where_clause} ) AS NUM_EVENT FROM DUAL""".format(owner=self.owner, where_clause=where_clause, dataset_join=self.dataset_join, block_join=self.block_join) binds.update(dataset=dataset) else: # Oracle IN only supports a maximum of 1,000 values # (ORA-01795: maximum number of expressions in a list is 1000) if isinstance(block_name, basestring): block_name=[block_name] block_clause = "BS.BLOCK_NAME IN (SELECT TOKEN FROM TOKEN_GENERATOR) " generatedsql, run_binds = create_token_generator(block_name) binds.update(run_binds) where_clause = "WHERE {block_clause}".format(block_clause=block_clause) if detail: sql = generatedsql + \ """ select b.block_name as block_name, b.file_count as num_file, b.block_size as file_size, t1.num_event as num_event, b.open_for_writing as open_for_writing from {owner}blocks b, (select bs.block_name as block_name, NVL(sum(fs.event_count),0) as num_event from {owner}files fs {block_join} {where_clause} group by bs.block_name )t1 where t1.block_name = b.block_name """.format(owner=self.owner, block_join=self.block_join, where_clause=where_clause) else: sql = generatedsql + \ """SELECT ( SELECT NVL(SUM(BS.BLOCK_SIZE),0) FROM {owner}BLOCKS BS {where_clause} ) AS FILE_SIZE, ( SELECT NVL(SUM(BS.FILE_COUNT),0) FROM {owner} BLOCKS BS {where_clause} ) AS NUM_FILE, ( SELECT NVL(SUM(FS.EVENT_COUNT),0) FROM {owner}FILES FS {block_join} {where_clause} ) AS NUM_EVENT FROM DUAL""".format(owner=self.owner, where_clause=where_clause, block_join=self.block_join) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] #self.logger.debug(sql) #self.logger.debug(binds) for cursor in cursors: result.extend(self.formatCursor(cursor, size=100)) return result
def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="", release_version="", pset_hash="", app_name="", output_module_label="", global_tag="", processing_version=0, acquisition_era="", run_num=-1, physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", prep_id="", create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0, ldate=0, dataset_id=-1, transaction=False): if not conn: dbsExceptionHandler( "dbsException-db-conn-failed", "Oracle/Dataset/BriefList. Expects db connection from upper layer." ) selectsql = 'SELECT ' joinsql = '' generatedsql = '' binds = {} wheresql = 'WHERE D.IS_DATASET_VALID=:is_dataset_valid ' if dataset and type(dataset) is list: # for the POST method #wheresql += " AND D.DATASET=:dataset " ds_generator, binds2 = create_token_generator(dataset) binds.update(binds2) wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format(ds_generator=ds_generator) if dataset_access_type and (dataset_access_type != "%" or dataset_access_type != '*'): joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % ( self.owner) op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid elif dataset_id is not None and type( dataset_id) is not int: # for the POST method #we treat the datset_id is the same way as run_num. It can be id1-id2, id or [id1,2,3 ...] dataset_id_list = [] wheresql_dataset_id_list = '' wheresql_dataset_id_range = '' for id in parseRunRange(dataset_id): if isinstance(id, basestring) or isinstance( id, int) or isinstance(id, long): dataset_id_list.append(str(id)) if isinstance(id, run_tuple): if id[0] == id[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS dataset_id range must be apart at least by 1." ) wheresql_dataset_id_range = " D.DATASET_ID between :minid and :maxid " binds.update({"minid": id[0]}) binds.update({"maxid": id[1]}) if dataset_id_list: ds_generator, binds2 = create_token_generator(dataset_id_list) binds.update(binds2) wheresql_dataset_id_list = " D.DATASET_ID in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format( ds_generator=ds_generator) if dataset_access_type and (dataset_access_type != "%" or dataset_access_type != '*'): joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % ( self.owner) op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid if wheresql_dataset_id_list and wheresql_dataset_id_range: wheresql += " and (" + wheresql_dataset_id_list + " or " + wheresql_dataset_id_range + " )" elif wheresql_dataset_id_list and not wheresql_dataset_id_range: wheresql += " and " + wheresql_dataset_id_list elif not wheresql_dataset_id_list and wheresql_dataset_id_range: wheresql += " and " + wheresql_dataset_id_range else: #for the GET method binds.update(is_dataset_valid=is_dataset_valid) if cdate != 0: wheresql += "AND D.CREATION_DATE = :cdate " binds.update(cdate=cdate) elif min_cdate != 0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate " binds.update(min_cdate=min_cdate) binds.update(max_cdate=max_cdate) elif min_cdate != 0 and max_cdate == 0: wheresql += "AND D.CREATION_DATE > :min_cdate " binds.update(min_cdate=min_cdate) elif min_cdate == 0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE < :max_cdate " binds.update(max_cdate=max_cdate) else: pass if ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate " binds.update(ldate=ldate) elif min_ldate != 0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate " binds.update(min_ldate=min_ldate) binds.update(max_ldate=max_ldate) elif min_ldate != 0 and max_ldate == 0: wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate " binds.update(min_ldate=min_ldate) elif min_ldate == 0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate " binds.update(max_ldate=max_ldate) else: pass if create_by: wheresql += " AND D.CREATE_BY = :create_by " binds.update(create_by=create_by) if last_modified_by: wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by " binds.update(last_modified_by=last_modified_by) if prep_id: wheresql += "AND D.prep_id = :prep_id " binds.update(prep_id=prep_id) if dataset and isinstance(dataset, basestring) and dataset != "%": op = ("=", "like")["%" in dataset] wheresql += " AND D.DATASET %s :dataset " % op binds.update(dataset=dataset) if dataset_id != -1: wheresql += " AND D.DATASET_ID = :dataset_id " binds.update(dataset_id=dataset_id) if primary_ds_name and primary_ds_name != "%": joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % ( self.owner) op = ("=", "like")["%" in primary_ds_name] wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op binds.update(primary_ds_name=primary_ds_name) if primary_ds_type and primary_ds_type != "%": if not primary_ds_name: joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % ( self.owner) joinsql += " JOIN %sPRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID " % ( self.owner) op = ("=", "like")["%" in primary_ds_type] wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " % op binds.update(primary_ds_type=primary_ds_type) if processed_ds_name and processed_ds_name != "%": joinsql += " JOIN %sPROCESSED_DATASETS PR ON PR.PROCESSED_DS_ID = D.PROCESSED_DS_ID " % ( self.owner) op = ("=", "like")["%" in processed_ds_name] wheresql += " AND PR.PROCESSED_DS_NAME %s :processed_ds_name " % op binds.update(processed_ds_name=processed_ds_name) if data_tier_name and data_tier_name != "%": joinsql += " JOIN %sDATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID " % ( self.owner) op = ("=", "like")["%" in data_tier_name] wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op binds.update(data_tier_name=data_tier_name) if dataset_access_type and (dataset_access_type != "%" or dataset_access_type != '*'): joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % ( self.owner) op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op binds.update(dataset_access_type=dataset_access_type) if physics_group_name and physics_group_name != "%": joinsql += " LEFT OUTER JOIN %sPHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID " % ( self.owner) op = ("=", "like")["%" in physics_group_name] wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " % op binds.update(physics_group_name=physics_group_name) if parent_dataset: joinsql += """ LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID """ % ((self.owner, ) * 2) wheresql += " AND PDS.DATASET = :parent_dataset " binds.update(parent_dataset=parent_dataset) if release_version or pset_hash or app_name or output_module_label or global_tag: joinsql += """ LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID """ % ((self.owner, ) * 2) if release_version: joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID " % ( self.owner) op = ("=", "like")["%" in release_version] wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op binds.update(release_version=release_version) if pset_hash: joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID " % ( self.owner) op = ("=", "like")["%" in pset_hash] wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op binds.update(pset_hash=pset_hash) if app_name: joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID " % ( self.owner) op = ("=", "like")["%" in app_name] wheresql += " AND AEX.APP_NAME %s :app_name " % op binds.update(app_name=app_name) if output_module_label: op = ("=", "like")["%" in output_module_label] wheresql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label " % op binds.update(output_module_label=output_module_label) if global_tag: op = ("=", "like")["%" in global_tag] wheresql += " AND OMC.GLOBAL_TAG %s :global_tag " % op binds.update(global_tag=global_tag) if processing_version != 0: joinsql += " LEFT OUTER JOIN %sPROCESSING_ERAS PE ON PE.PROCESSING_ERA_ID = D.PROCESSING_ERA_ID " % ( self.owner) #op = ("=", "like")["%" in processing_version] op = "=" wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op binds.update(pversion=processing_version) if acquisition_era: joinsql += " LEFT OUTER JOIN %sACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID " % ( self.owner) op = ("=", "like")["%" in acquisition_era] wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op binds.update(aera=acquisition_era) if logical_file_name and logical_file_name != "%": selectsql += "DISTINCT " joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name " binds.update(logical_file_name=logical_file_name) # if run_num != -1: if not logical_file_name: selectsql += "DISTINCT " joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % ( self.owner) joinsql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % ( self.owner) run_list = [] wheresql_run_list = '' wheresql_run_range = '' for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance( r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1.") wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun " binds.update({"minrun": r[0]}) binds.update({"maxrun": r[1]}) # if run_list: wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) generatedsql = "{run_generator}".format( run_generator=run_generator) binds.update(run_binds) if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list sql = "".join( (generatedsql, selectsql, self.basesql, joinsql, wheresql)) #self.logger.error( sql) #self.logger.error( binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="",\ release_version="", pset_hash="", app_name="", output_module_label="",\ global_tag="", processing_version=0, acquisition_era="", run_num=-1,\ physics_group_name="", logical_file_name="", primary_ds_name="",\ primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", prep_id="",\ create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0,\ ldate=0, dataset_id=-1, transaction=False): if not conn: dbsExceptionHandler( "dbsException-failed-connect2host", "%s Oracle/Dataset/List. Expects db connection from upper layer.", self.logger.exception) sql = "" generatedsql = '' basesql = self.basesql binds = {} wheresql = "WHERE D.IS_DATASET_VALID = :is_dataset_valid " if dataset and type(dataset) is list: # for the POST method ds_generator, binds2 = create_token_generator(dataset) binds.update(binds2) wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format(ds_generator=ds_generator) if dataset_access_type and dataset_access_type != "%": op = ("=", "like")["%" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid sql = generatedsql + 'SELECT' + basesql + wheresql elif type(dataset_id) is not int: #for the POST method #we treat the datset_id is the same way as run_num. It can be id1-id2, id or [id1,2,3 ...] dataset_id_list = [] wheresql_dataset_id_list = '' wheresql_dataset_id_range = '' for id in parseRunRange(dataset_id): if isinstance(id, basestring) or isinstance( id, int) or isinstance(id, long): dataset_id_list.append(str(id)) if isinstance(id, run_tuple): if id[0] == id[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS dataset_id range must be apart at least by 1.", self.logger.exception) wheresql_dataset_id_range = " D.DATASET_ID between :minid and :maxid " binds.update({"minid": id[0]}) binds.update({"maxid": id[1]}) if dataset_id_list: ds_generator, binds2 = create_token_generator(dataset_id_list) binds.update(binds2) wheresql_dataset_id_list = " D.DATASET_ID in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format( ds_generator=ds_generator) if dataset_access_type and dataset_access_type != "%": op = ("=", "like")["%" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid if wheresql_dataset_id_list and wheresql_dataset_id_range: sql = generatedsql + 'SELECT' + basesql + wheresql + " and ( " \ + wheresql_dataset_id_list + " or " + wheresql_dataset_id_range + " )" elif wheresql_dataset_id_list and not wheresql_dataset_id_range: sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_list elif not wheresql_dataset_id_list and wheresql_dataset_id_range: sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_range else: #for the GET method binds.update(is_dataset_valid=is_dataset_valid) if cdate != 0: wheresql += "AND D.CREATION_DATE = :cdate " binds.update(cdate=cdate) elif min_cdate != 0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate " binds.update(min_cdate=min_cdate) binds.update(max_cdate=max_cdate) elif min_cdate != 0 and max_cdate == 0: wheresql += "AND D.CREATION_DATE > :min_cdate " binds.update(min_cdate=min_cdate) elif min_cdate == 0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE < :max_cdate " binds.update(max_cdate=max_cdate) else: pass if ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate " binds.update(ldate=ldate) elif min_ldate != 0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate " binds.update(min_ldate=min_ldate) binds.update(max_ldate=max_ldate) elif min_ldate != 0 and max_ldate == 0: wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate " binds.update(min_ldate=min_ldate) elif min_ldate == 0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate " binds.update(max_ldate=max_ldate) else: pass if create_by: wheresql += " AND D.CREATE_BY = :create_by " binds.update(create_by=create_by) if last_modified_by: wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by " binds.update(last_modified_by=last_modified_by) if prep_id: wheresql += "AND D.PREP_ID = :prep_id " binds.update(prep_id=prep_id) if dataset and dataset != "%": op = ("=", "like")["%" in dataset] wheresql += " AND D.DATASET %s :dataset " % op binds.update(dataset=dataset) if dataset_id != -1: wheresql += " AND D.DATASET_ID = :dataset_id " binds.update(dataset_id=dataset_id) if primary_ds_name and primary_ds_name != "%": op = ("=", "like")["%" in primary_ds_name] wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op binds.update(primary_ds_name=primary_ds_name) if processed_ds_name and processed_ds_name != "%": op = ("=", "like")["%" in processed_ds_name] wheresql += " AND PD.PROCESSED_DS_NAME %s :processed_ds_name " % op binds.update(processed_ds_name=processed_ds_name) if data_tier_name and data_tier_name != "%": op = ("=", "like")["%" in data_tier_name] wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op binds.update(data_tier_name=data_tier_name) if dataset_access_type and dataset_access_type != "%": op = ("=", "like")["%" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op binds.update(dataset_access_type=dataset_access_type) if primary_ds_type and primary_ds_type != "%": op = ("=", "like")["%" in primary_ds_type] wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " % op binds.update(primary_ds_type=primary_ds_type) if physics_group_name and physics_group_name != "%": op = ("=", "like")["%" in physics_group_name] wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " % op binds.update(physics_group_name=physics_group_name) if parent_dataset: basesql = "PDS.DATASET PARENT_DATASET, " + basesql basesql += """ LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID """ % ((self.owner, ) * 2) wheresql += " AND PDS.DATASET = :parent_dataset " binds.update(parent_dataset=parent_dataset) if release_version or pset_hash or app_name or output_module_label: basesql = """ OMC.OUTPUT_MODULE_LABEL, OMC.GLOBAL_TAG, RV.RELEASE_VERSION, PSH.PSET_HASH, AEX.APP_NAME, """ + basesql basesql += """ LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID """ % ((self.owner, ) * 5) if release_version: op = ("=", "like")["%" in release_version] wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op binds.update(release_version=release_version) if pset_hash: op = ("=", "like")["%" in pset_hash] wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op binds.update(pset_hash=pset_hash) if app_name: op = ("=", "like")["%" in app_name] wheresql += " AND AEX.APP_NAME %s :app_name " % op binds.update(app_name=app_name) if output_module_label: op = ("=", "like")["%" in output_module_label] wheresql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label " % op binds.update(output_module_label=output_module_label) if global_tag: op = ("=", "like")["%" in global_tag] wheresql += " AND OMC.GLOBAL_TAG %s :global_tag " % op binds.update(global_tag=global_tag) if processing_version != 0: #op = ("=", "like")["%" in processing_version] op = "=" wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op binds.update(pversion=processing_version) if acquisition_era: op = ("=", "like")["%" in acquisition_era] wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op binds.update(aera=acquisition_era) # This should resolve to original cases that were in the business logic if (not logical_file_name or logical_file_name == "%") and (run_num == -1): # """JUST EXECUTE THE QUERY HERE""" sql = "SELECT " + basesql + wheresql elif (run_num == -1) and logical_file_name and logical_file_name != "%": # """DO execute 1 thingy""" sql = "SELECT DISTINCT " + basesql sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name " binds.update(logical_file_name=logical_file_name) sql += wheresql elif (run_num != -1): # """Do execute 2 thingy""" sql += "SELECT DISTINCT " + basesql if logical_file_name: sql += "JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name " binds.update(logical_file_name=logical_file_name) else: sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % ( self.owner) # sql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % ( self.owner) run_list = [] wheresql_run_list = '' wheresql_run_range = '' for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance( r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun " binds.update({"minrun": r[0]}) binds.update({"maxrun": r[1]}) # if run_list: wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format( run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list sql += wheresql else: dbsExceptionHandler( "dbsException-invalid-input", "Oracle/Dataset/List. Proper parameters are not\ provided for listDatasets call.", self.logger.exception) #self.logger.error( sql) #self.logger.error("binds=%s" %binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, block_name="", dataset="", run_num=-1, validFileOnly=0, transaction=False): if not conn: dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/SummaryList. Expects db connection from upper layer.") binds = {} whererun = '' run_list = [] wheresql_run_list = '' wheresql_run_range = '' wheresql_isFileValid = '' join_valid_ds1 = '' join_valid_ds2 = '' sql = '' # if int(validFileOnly) == 1 : wheresql_isFileValid =""" and f.is_file_valid = 1 and DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ join_valid_ds1 = """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """% ((self.owner,)*2) join_valid_ds2 = """ JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """% ((self.owner,)*1) # if run_num != -1: # for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, (long,int)): #if not wheresql_run_list: # wheresql_run_list = " fl.RUN_NUM = :run_list " run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) binds.update(run_binds) if wheresql_run_list and wheresql_run_range: whererun = wheresql_run_range + " or " + wheresql_run_list elif wheresql_run_list: whererun = wheresql_run_list elif wheresql_run_range: whererun = wheresql_run_range if block_name: if run_num != -1: # sql = sql +\ """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} ) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}files f on f.block_id=b.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) )as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1) binds.update({"block_name":block_name}) else: sql = """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ) as file_size, (select count(block_id) from {owner}blocks where block_name=:block_name ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f join {owner}file_lumis l on l.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid}) ) as num_lumi from dual """ .format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1 ) binds.update({"block_name":block_name}) elif dataset: if run_num != -1: sql = sql + \ """ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} join {owner}files f on f.block_id = b.block_id where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2) binds.update({"dataset":dataset}) else: sql = """ \ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) as file_size, (select count(b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} where d.dataset=:dataset ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f join {owner}file_lumis l on l.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid}) ) as num_lumi from dual """.format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2) binds.update({"dataset":dataset}) else: return [] cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result=[] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, dataset="", block_name="", logical_file_name="", release_version="", pset_hash="", app_name="", output_module_label="", run_num=-1, origin_site_name="", lumi_list=[], transaction=False): if not conn: dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/List. Expects db connection from upper layer.") sql = self.sql_cond binds = {} sql_sel = self.sql_sel run_generator = '' if run_num != -1 : sql_sel = sql_sel.replace("SELECT", "SELECT DISTINCT") + " , FL.RUN_NUM " sql += " JOIN %sFILE_LUMIS FL on FL.FILE_ID=F.FILE_ID " %(self.owner) if release_version or pset_hash or app_name or output_module_label : sql += """LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID """ % ((self.owner,)*5) #FIXME : the status check should only be done with normal/super user #sql += """WHERE F.IS_FILE_VALID = 1""" # for the time being lests list all files sql += """WHERE F.IS_FILE_VALID <> -1 """ if block_name: sql += " AND B.BLOCK_NAME = :block_name" binds.update({"block_name":block_name}) if logical_file_name: op = ("=", "like")["%" in logical_file_name] sql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op binds.update({"logical_file_name":logical_file_name}) if dataset: sql += " AND D.DATASET = :dataset" binds.update({"dataset":dataset}) if release_version: op = ("=", "like")["%" in release_version] sql += " AND RV.RELEASE_VERSION %s :release_version" % op binds.update({"release_version":release_version}) if pset_hash: op = ("=", "like")["%" in pset_hash] sql += " AND PSH.PSET_HASH %s :pset_hash" % op binds.update({"pset_hash" :pset_hash}) if app_name: op = ("=", "like")["%" in app_name] sql += " AND AEX.APP_NAME %s :app_name" % op binds.update({"app_name": app_name}) if output_module_label: op = ("=", "like")["%" in output_module_label] sql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label" %op binds.update({"output_module_label":output_module_label}) if (origin_site_name): op = ("=","like")["%" in origin_site_name] sql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name" % op binds.update({"origin_site_name":origin_site_name}) if run_num != -1 : run_list=[] wheresql_run_list='' wheresql_run_range='' for r in parseRunRange(run_num): if isinstance(r, str) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") if not lumi_list: wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") # if run_list and not lumi_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) #sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " and " + wheresql_run_list # Make sure when we have a lumi_list, there is only ONE run -- YG 14/05/2013 if (lumi_list and len(lumi_list) != 0): if len(run_list) != 1: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") sql += " AND fl.RUN_NUM = :run_num AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " #Do I need to convert lumi_list to be a str list? YG 10/03/13 #Yes, you do. YG lumi_list = map(str, lumi_list) lumi_generator, lumi_binds = create_token_generator(lumi_list) sql_sel = "{lumi_generator}".format(lumi_generator=lumi_generator) + sql_sel binds.update(lumi_binds) binds["run_num"]=run_list[0] # sql = run_generator + sql_sel + sql cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, logical_file_name='', block_name='', run_num=-1, validFileOnly=0, migration=False): """ Lists lumi section numbers with in a file, a list of files or a block. """ sql = "" wheresql = "" lfn_generator = "" run_generator = "" if not conn: dbsExceptionHandler( "dbsException-db-conn-failed", "Oracle/FileLumi/List. Expects db connection from upper layer." ) if logical_file_name and not isinstance(logical_file_name, list): binds = {'logical_file_name': logical_file_name} if int(validFileOnly) == 0: if migration: #migration always call with single file and include all files no matter valid or not. sql = self.sql + """ FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID WHERE F.LOGICAL_FILE_NAME = :logical_file_name """.format(owner=self.owner) else: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID WHERE F.LOGICAL_FILE_NAME = :logical_file_name """.format(owner=self.owner) else: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME = :logical_file_name AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """.format(owner=self.owner) elif logical_file_name and isinstance(logical_file_name, list): sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID """.format( owner=self.owner) lfn_generator, binds = create_token_generator(logical_file_name) if int(validFileOnly) == 0: wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" else: sql = sql + """ JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """.format(owner=self.owner) wheresql = """ WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR) AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ sql = "{lfn_generator} {sql} {wheresql}".format( lfn_generator=lfn_generator, sql=sql, wheresql=wheresql) elif block_name: binds = {'block_name': block_name} if int(validFileOnly) == 0: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID WHERE B.BLOCK_NAME = :block_name""".format(owner=self.owner) else: sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID WHERE F.IS_FILE_VALID = 1 AND B.BLOCK_NAME = :block_name AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """.format(owner=self.owner) else: dbsExceptionHandler( 'dbsException-invalid-input2', "FileLumi/List: Either logocal_file_name or block_name must be provided.", self.logger.exception, "FileLumi/List: Either logocal_file_name or block_name must be provided." ) # if run_num != -1: run_list = [] wheresql_run_list = '' wheresql_run_range = '' for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance( r, int) or isinstance(r, long) or isinstance(r, str): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input2', "DBS run range must be apart at least by 1.", self.logger.exception, "DBS run range must be apart at least by 1.") wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun " binds.update({"minrun": r[0]}) binds.update({"maxrun": r[1]}) # if run_list: if len(run_list) == 1: wheresql_run_list = " fl.RUN_NUM = :single_run " binds.update({"single_run": long(run_list[0])}) else: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format( run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " and " + wheresql_run_list self.logger.debug(sql) self.logger.debug(binds) if run_generator and lfn_generator: dbsExceptionHandler( 'dbsException-invalid-input2', "listFileLumiArray support single list of lfn or run_num. ", self.logger.exception, "listFileLumiArray support single list of lfn or run_num. ") cursors = self.dbi.processData(sql, binds, conn, transaction=False, returnCursor=True) result = [] file_run_lumi = {} for i in cursors: result.extend(self.formatCursor(i)) #for migration, we need flat format to load the data into another DB. if migration: #YG 09/2015. for item in result: yield item else: for i in result: r = i['run_num'] f = i['logical_file_name'] file_run_lumi.setdefault((f, r), []).append(i['lumi_section_num']) for k, v in file_run_lumi.iteritems(): yield { 'logical_file_name': k[0], 'run_num': k[1], 'lumi_section_num': v } del file_run_lumi del result
def execute(self, conn, dataset="", block_name="", logical_file_name="", release_version="", pset_hash="", app_name="", output_module_label="", run_num=-1, origin_site_name="", lumi_list=[], validFileOnly=0, transaction=False): if not conn: dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/BriefList. Expects db connection from upper layer.") #import pdb #pdb.set_trace() binds = {} run_generator = '' lfn_generator = '' lumi_generator = '' generatedsql = '' basesql = self.sql joinsql = '' # Check if file is valid. YG 1/29/2015 if int(validFileOnly) == 0: wheresql = " WHERE F.IS_FILE_VALID <> -1 " elif int(validFileOnly) == 1: wheresql = " WHERE F.IS_FILE_VALID = 1 " else: dbsExceptionHandler("dbsException-invalid-input", "invalid value for validFileOnly.") if logical_file_name: if type(logical_file_name) is not list: #for GET call op = ("=", "like")["%" in logical_file_name] wheresql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name " %op binds.update({"logical_file_name":logical_file_name}) elif type(logical_file_name) is list: #for POST call lfn_generator, binds2 = create_token_generator(logical_file_name) binds.update(binds2) wheresql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" #generatedsql = "{lfn_generator}".format(lfn_generator=lfn_generator) if not dataset: if int(validFileOnly) == 1: joinsql += """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner,)*2) wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " else: pass if block_name: joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID " % (self.owner) wheresql += " AND B.BLOCK_NAME = :block_name " binds.update({"block_name":block_name}) if not dataset and int(validFileOnly) == 1: joinsql += """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner,)*2) wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " else: pass if dataset: joinsql += """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID """ % (self.owner) wheresql += " AND D.DATASET = :dataset " binds.update({"dataset":dataset}) if int(validFileOnly) == 1: joinsql += " JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID " % (self.owner) wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " else: pass if release_version or pset_hash or app_name or output_module_label: joinsql += """ LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID """ % ((self.owner,)*2) if release_version: joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID" % (self.owner) op = ("=", "like")["%" in release_version] wheresql += " AND RV.RELEASE_VERSION %s :release_version" % op binds.update(release_version=release_version) if pset_hash: joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID" % (self.owner) op = ("=", "like")["%" in pset_hash] wheresql += " AND PSH.PSET_HASH %s :pset_hash" % op binds.update(pset_hash = pset_hash) if app_name: joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID" % (self.owner) op = ("=", "like")["%" in app_name] wheresql += " AND AEX.APP_NAME %s :app_name" % op binds.update(app_name = app_name) if output_module_label: op = ("=", "like")["%" in output_module_label] wheresql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label" % op binds.update(output_module_label=output_module_label) if (origin_site_name): if not block_name: joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID" % (self.owner) op = ("=","like")["%" in origin_site_name] wheresql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name" % op binds.update({"origin_site_name":origin_site_name}) if run_num != -1 and run_num : # elimnate empty list run_num=[] basesql = basesql.replace("SELECT", "SELECT DISTINCT") joinsql += " JOIN %sFILE_LUMIS FL on FL.FILE_ID=F.FILE_ID " %(self.owner) run_list=[] wheresql_run_list='' wheresql_run_range='' wheresql_run_range_ct = 0 try: run_num = long(run_num) wheresql += " and FL.RUN_NUM = :run_num " binds.update({"run_num":run_num}) except: if isinstance(run_num, basestring): for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") if not lumi_list: if wheresql_run_range_ct >0 : wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) binds.update({"minrun%s"%wheresql_run_range_ct :r[0]}) binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") else: dbsExceptionHandler('dbsException-invalid-input', "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. ") elif type(run_num) is list and len(run_num)==1: try: run_num = long(run_num[0]) wheresql += " and FL.RUN_NUM = :run_num " binds.update({"run_num":run_num}) except: for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") if not lumi_list: if wheresql_run_range_ct >0 : wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) binds.update({"minrun%s"%wheresql_run_range_ct :r[0]}) binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") else: dbsExceptionHandler('dbsException-invalid-input', "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']") else: for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") if not lumi_list: if wheresql_run_range_ct >0 : wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) binds.update({"minrun%s"%wheresql_run_range_ct :r[0]}) binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") if run_list and not lumi_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) binds.update(run_binds) if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and ( " + wheresql_run_range + " ) " elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list # Make sure when we have a lumi_list, there is only ONE run -- YG 14/05/2013 if (lumi_list and len(lumi_list) != 0): if len(run_list) > 1: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.") wheresql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " #Do I need to convert lumi_list to be a str list? YG 10/03/13 #Yes, you do. YG lumi_list = map(str, lumi_list) lumi_generator, lumi_binds = create_token_generator(lumi_list) binds.update(lumi_binds) #binds["run_num"]=run_list[0] # if (run_generator and lfn_generator ) or (lumi_generator and lfn_generator ): self.logger.error( "run_generator : %s" %run_generator) self.logger.error( "lfn_generator: %s" %lfn_generator) self.logger.error( "lumi_generator: %s " %lumi_generator) dbsExceptionHandler('dbsException-invalid-input2', message="cannot supply more than one list (lfn, run_num or lumi) at one query. ", serverError="dao/File/list cannot have more than one list (lfn, run_num or lumi) as input pareamters") # only one with and generators should be named differently for run and lfn. #sql = run_generator + lfn_generator + sql_sel + sql else: sql = run_generator + lfn_generator + lumi_generator + basesql + self.fromsql + joinsql + wheresql self.logger.debug( "sql=%s" %sql) self.logger.debug( "binds=%s" %binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result=[] for i in range(len(cursors)): result.extend(self.formatCursor(cursors[i])) return result
def execute(self, conn, block_name="", dataset="", run_num=-1, validFileOnly=0, sumOverLumi=0, transaction=False): binds = {} whererun = '' run_list = [] wheresql_run_list = '' wheresql_run_range = '' wheresql_isFileValid = '' join_valid_ds1 = '' join_valid_ds2 = '' join_bk_fl = '' sql = '' # if int(validFileOnly) == 1: wheresql_isFileValid = """ and f.is_file_valid = 1 and DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ join_valid_ds1 = """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner, ) * 2) join_valid_ds2 = """ JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner, ) * 1) join_bk_fl = """ join %sfiles f on f.block_id = b.block_id """ % ((self.owner, ) * 1) # if run_num != -1: # for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, (long, int)): #if not wheresql_run_list: # wheresql_run_list = " fl.RUN_NUM = :run_list " run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler( 'dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun " binds.update({"minrun": int(r[0])}) binds.update({"maxrun": int(r[1])}) # if run_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) binds.update(run_binds) if wheresql_run_list and wheresql_run_range: whererun = wheresql_run_range + " or " + wheresql_run_list elif wheresql_run_list: whererun = wheresql_run_list elif wheresql_run_range: whererun = wheresql_run_range self.logger.debug('sumOverLumi=%s' % sumOverLumi) if block_name: if run_num != -1: if int(sumOverLumi) == 0: # sql = sql +\ """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} ) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}files f on f.block_id=b.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) )as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1) binds.update({"block_name": block_name}) elif int(sumOverLumi) == 1: sql = sql +\ """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} ) ) as num_file, ( with myFiles as (select file_id from {owner}files f join {owner}blocks b on b.block_id = f.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} ) select sum(fl.event_count) event_count from {owner}file_lumis fl join myFiles on myFiles.file_id = fl.file_id where {whererun} and not exists (select fl2.file_id from {owner}file_lumis fl2 join myFiles on myFiles.file_id = fl2.file_id where fl2.event_count is null ) ) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}files f on f.block_id=b.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) )as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1) binds.update({"block_name": block_name}) else: sql = """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ) as file_size, (select nvl(count(distinct b.block_id),0) from {owner}blocks b {join_bk_fl} {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f join {owner}file_lumis l on l.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid}) ) as num_lumi from dual """.format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1, join_bk_fl=join_bk_fl) binds.update({"block_name": block_name}) elif dataset: if run_num != -1: if int(sumOverLumi) == 0: sql = sql + \ """ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} join {owner}files f on f.block_id = b.block_id where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2) binds.update({"dataset": dataset}) elif int(sumOverLumi) == 1: sql = sql + \ """ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_file, ( with myFiles as (select file_id from {owner}files f join {owner}datasets d on d.dataset_id = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) select sum(fl.event_count) event_count from {owner}file_lumis fl join myFiles on myFiles.file_id = fl.file_id where {whererun} and not exists (select fl2.file_id from {owner}file_lumis fl2 join myFiles on myFiles.file_id = fl2.file_id where fl2.event_count is null ) ) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} join {owner}files f on f.block_id = b.block_id where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2) binds.update({"dataset": dataset}) else: sql = """ \ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} {join_bk_fl} where d.dataset=:dataset {wheresql_isFileValid} ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}file_lumis l join {owner}files f on f.file_id=l.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid}) ) as num_lumi from dual """.format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2, join_bk_fl=join_bk_fl) binds.update({"dataset": dataset}) else: return #self.logger.error(sql) #self.logger.error(binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="", release_version="", pset_hash="", app_name="", output_module_label="", processing_version=0, acquisition_era="", run_num=-1, physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", prep_id="", create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0, ldate=0, transaction=False): if not conn: dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/Dataset/BriefList. Expects db connection from upper layer.") selectsql = 'SELECT ' joinsql = '' generatedsql = '' binds = {} wheresql = 'WHERE D.IS_DATASET_VALID=:is_dataset_valid ' if dataset and type(dataset) is list: # for the POST method #wheresql += " AND D.DATASET=:dataset " ds_generator, binds = create_token_generator(dataset) wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format(ds_generator=ds_generator) if dataset_access_type and (dataset_access_type !="%" or dataset_access_type != '*'): joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (self.owner) op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid else: #for the GET method binds.update(is_dataset_valid=is_dataset_valid) if cdate != 0: wheresql += "AND D.CREATION_DATE = :cdate " binds.update(cdate = cdate) elif min_cdate != 0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate " binds.update(min_cdate = min_cdate) binds.update(max_cdate = max_cdate) elif min_cdate != 0 and max_cdate == 0: wheresql += "AND D.CREATION_DATE > :min_cdate " binds.update(min_cdate = min_cdate) elif min_cdate ==0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE < :max_cdate " binds.update(max_cdate = max_cdate) else: pass if ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate " binds.update(ldate = ldate) elif min_ldate != 0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate " binds.update(min_ldate = min_ldate) binds.update(max_ldate = max_ldate) elif min_ldate != 0 and max_ldate == 0: wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate " binds.update(min_ldate = min_ldate) elif min_ldate ==0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate " binds.update(max_ldate = max_ldate) else: pass if create_by: wheresql += " AND D.CREATE_BY = :create_by " binds.update(create_by = create_by) if last_modified_by: wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by " binds.update(last_modified_by = last_modified_by) if prep_id: wheresql += "AND D.prep_id = :prep_id " binds.update(prep_id = prep_id) if dataset and type(dataset) is str and dataset != "%": op = ("=", "like")["%" in dataset] wheresql += " AND D.DATASET %s :dataset " % op binds.update(dataset = dataset) if primary_ds_name and primary_ds_name != "%": joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % (self.owner) op = ("=", "like")["%" in primary_ds_name ] wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op binds.update(primary_ds_name = primary_ds_name) if primary_ds_type and primary_ds_type !="%": if not primary_ds_name: joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % (self.owner) joinsql += " JOIN %sPRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID " % (self.owner) op = ("=", "like")["%" in primary_ds_type] wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " %op binds.update(primary_ds_type=primary_ds_type) if processed_ds_name and processed_ds_name != "%": joinsql += " JOIN %sPROCESSED_DATASETS PR ON PR.PROCESSED_DS_ID = D.PROCESSED_DS_ID " % (self.owner) op = ("=", "like")["%" in processed_ds_name ] wheresql += " AND PR.PROCESSED_DS_NAME %s :processed_ds_name " % op binds.update(processed_ds_name = processed_ds_name) if data_tier_name and data_tier_name != "%": joinsql += " JOIN %sDATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID " % (self.owner) op = ("=", "like")["%" in data_tier_name ] wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op binds.update(data_tier_name=data_tier_name) if dataset_access_type and (dataset_access_type !="%" or dataset_access_type != '*'): joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (self.owner) op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op binds.update(dataset_access_type=dataset_access_type) if physics_group_name and physics_group_name !="%": joinsql += " LEFT OUTER JOIN %sPHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID " % (self.owner) op = ("=", "like")["%" in physics_group_name] wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " %op binds.update(physics_group_name=physics_group_name) if parent_dataset: joinsql += """ LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID """ % ((self.owner,)*2) wheresql += " AND PDS.DATASET = :parent_dataset " binds.update(parent_dataset = parent_dataset) if release_version or pset_hash or app_name or output_module_label: joinsql += """ LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID """ % ((self.owner,)*2) if release_version: joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID " % (self.owner) op = ("=", "like")["%" in release_version] wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op binds.update(release_version=release_version) if pset_hash: joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID " % (self.owner) op = ("=", "like")["%" in pset_hash] wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op binds.update(pset_hash = pset_hash) if app_name: joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID " % (self.owner) op = ("=", "like")["%" in app_name] wheresql += " AND AEX.APP_NAME %s :app_name " % op binds.update(app_name = app_name) if output_module_label: op = ("=", "like")["%" in output_module_label] wheresql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label " % op binds.update(output_module_label=output_module_label) if processing_version != 0: joinsql += " LEFT OUTER JOIN %sPROCESSING_ERAS PE ON PE.PROCESSING_ERA_ID = D.PROCESSING_ERA_ID " % (self.owner) #op = ("=", "like")["%" in processing_version] op = "=" wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op binds.update(pversion=processing_version) if acquisition_era: joinsql += " LEFT OUTER JOIN %sACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID " % (self.owner) op = ("=", "like")["%" in acquisition_era] wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op binds.update(aera=acquisition_era) if logical_file_name and logical_file_name !="%": selectsql += "DISTINCT " joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name " binds.update(logical_file_name = logical_file_name) # if run_num != -1: if not logical_file_name: selectsql += "DISTINCT " joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % (self.owner) joinsql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % (self.owner) run_list = [] wheresql_run_list='' wheresql_run_range='' for r in parseRunRange(run_num): if isinstance(r, str) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.") wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) generatedsql = "{run_generator}".format(run_generator=run_generator) binds.update(run_binds) if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list sql = "".join((generatedsql, selectsql, self.basesql, joinsql, wheresql)) #self.logger.debug( sql) #self.logger.debug( binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for i in cursors: d = self.formatCursor(i) if d: result += d return result
def execute(self, conn, dataset="", block_name="", data_tier_name="", origin_site_name="", logical_file_name="", run_num=-1, min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0, ldate=0, open_for_writing=-1, transaction = False): """ dataset: /a/b/c block: /a/b/c#d """ binds = {} basesql = self.sql joinsql = "" wheresql = "" generatedsql = "" if logical_file_name and logical_file_name != "%": joinsql += " JOIN %sFILES FL ON FL.BLOCK_ID = B.BLOCK_ID " %(self.owner) op = ("=", "like")["%" in logical_file_name] wheresql += " WHERE LOGICAL_FILE_NAME %s :logical_file_name " % op binds.update( logical_file_name = logical_file_name ) if block_name and block_name !="%": andorwhere = ("WHERE", "AND")[bool(wheresql)] op = ("=", "like")["%" in block_name] wheresql += " %s B.BLOCK_NAME %s :block_name " % ((andorwhere, op)) binds.update( block_name = block_name ) if data_tier_name or (dataset and dataset!="%"): joinsql += "JOIN %sDATASETS DS ON DS.DATASET_ID = B.DATASET_ID " % (self.owner) andorwhere = ("WHERE", "AND")[bool(wheresql)] if dataset: op = ("=", "like")["%" in dataset] wheresql += " %s DS.DATASET %s :dataset " % ((andorwhere, op)) binds.update(dataset=dataset) if data_tier_name: joinsql += "JOIN {owner}DATA_TIERS DT ON DS.DATA_TIER_ID=DT.DATA_TIER_ID ".format(owner=self.owner) wheresql += " %s DT.DATA_TIER_NAME=:data_tier_name " % (andorwhere) binds.update(data_tier_name=data_tier_name) if origin_site_name and origin_site_name != "%": op = ("=", "like")["%" in origin_site_name] wheresql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name " % op binds.update(origin_site_name = origin_site_name) if open_for_writing == 0 or open_for_writing == 1: wheresql += " AND B.OPEN_FOR_WRITTING = :open_for_writing " if cdate != 0: wheresql += "AND B.CREATION_DATE = :cdate " binds.update(cdate = cdate) elif min_cdate != 0 and max_cdate != 0: wheresql += "AND B.CREATION_DATE BETWEEN :min_cdate and :max_cdate " binds.update(min_cdate = min_cdate) binds.update(max_cdate = max_cdate) elif min_cdate != 0 and max_cdate == 0: wheresql += "AND B.CREATION_DATE > :min_cdate " binds.update(min_cdate = min_cdate) elif min_cdate ==0 and max_cdate != 0: wheresql += "AND B.CREATION_DATE < :max_cdate " binds.update(max_cdate = max_cdate) else: pass if ldate != 0: wheresql += "AND B.LAST_MODIFICATION_DATE = :ldate " binds.update(ldate = ldate) elif min_ldate != 0 and max_ldate != 0: wheresql += "AND B.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate " binds.update(min_ldate = min_ldate) binds.update(max_ldate = max_ldate) elif min_ldate != 0 and max_ldate == 0: wheresql += "AND B.LAST_MODIFICATION_DATE > :min_ldate " binds.update(min_ldate = min_ldate) elif min_cdate ==0 and max_cdate != 0: wheresql += "AND B.LAST_MODIFICATION_DATE < :max_ldate " binds.update(max_ldate = max_ldate) else: pass #one may provide a list of runs , so it has to be the last one in building the bind. if run_num !=-1 : basesql = basesql.replace("SELECT", "SELECT DISTINCT") + " , FLM.RUN_NUM " if not logical_file_name: joinsql += " JOIN %sFILES FL ON FL.BLOCK_ID = B.BLOCK_ID " %(self.owner) joinsql += " JOIN %sFILE_LUMIS FLM on FLM.FILE_ID = FL.FILE_ID " %(self.owner) run_list=[] wheresql_run_list='' wheresql_run_range='' # for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.", self.logger.exception) wheresql_run_range = " FLM.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " FLM.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " generatedsql, run_binds = create_token_generator(run_list) binds.update(run_binds) # if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list # sql = " ".join((generatedsql, basesql, self.fromsql, joinsql, wheresql)) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, child_block_name='', child_lfn_list=[], transaction=False): sql = '' binds = {} child_ds_name = '' child_where = '' if child_block_name: child_ds_name = child_block_name.split('#')[0] parent_where = " where d.dataset = :child_ds_name ))" binds = {"child_ds_name": child_ds_name} else: dbsExceptionHandler( 'dbsException-invalid-input', "Missing child block_name for listFileParentsByLumi. ") # if not child_lfn_list: # most use cases child_where = " where b.block_name = :child_block_name )" binds.update({"child_block_name": child_block_name}) sql = """ with parents as ( """ +\ self.parent_sql +\ parent_where +\ """), """+\ """ children as ( """ +\ self.child_sql +\ child_where +\ """) select distinct cid, pid from children c inner join parents p on c.R = p.R and c.L = p.L """ else: # not commom child_where = """ where b.block_name = :child_block_name and f.logical_file_name in (SELECT TOKEN FROM TOKEN_GENERATOR) )) """ lfn_generator, bind = create_token_generator(child_lfn_list) binds.update(bind) sql = lfn_generator +\ """ with parents as ( """ +\ self.parent_sql +\ parent_where +\ """), """+\ """ children as ( """ +\ self.child_sql +\ child_where +\ """) select distinct cid, pid from children c inner join parents p on c.R = p.R and c.L = p.L """ print(sql) r = self.dbi.processData(sql, binds, conn, transaction=transaction) print(self.format(r)) return self.format(r) """
def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="",\ release_version="", pset_hash="", app_name="", output_module_label="",\ global_tag="", processing_version=0, acquisition_era="", run_num=-1,\ physics_group_name="", logical_file_name="", primary_ds_name="",\ primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", prep_id="",\ create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0,\ ldate=0, dataset_id=-1, transaction=False): if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "%s Oracle/Dataset/List. Expects db connection from upper layer.", self.logger.exception) sql = "" generatedsql = '' basesql=self.basesql binds = {} wheresql = "WHERE D.IS_DATASET_VALID = :is_dataset_valid " if dataset and type(dataset) is list: # for the POST method ds_generator, binds2 = create_token_generator(dataset) binds.update(binds2) wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format(ds_generator=ds_generator) if dataset_access_type and dataset_access_type !="%": op = ("=", "like")["%" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid sql =generatedsql + 'SELECT' + basesql + wheresql elif type(dataset_id) is not int: #for the POST method #we treat the datset_id is the same way as run_num. It can be id1-id2, id or [id1,2,3 ...] dataset_id_list = [] wheresql_dataset_id_list='' wheresql_dataset_id_range='' for id in parseRunRange(dataset_id): if isinstance(id, basestring) or isinstance(id, int) or isinstance(id, long): dataset_id_list.append(str(id)) if isinstance(id, run_tuple): if id[0] == id[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS dataset_id range must be apart at least by 1.", self.logger.exception) wheresql_dataset_id_range = " D.DATASET_ID between :minid and :maxid " binds.update({"minid":id[0]}) binds.update({"maxid":id[1]}) if dataset_id_list: ds_generator, binds2 = create_token_generator(dataset_id_list) binds.update(binds2) wheresql_dataset_id_list = " D.DATASET_ID in (SELECT TOKEN FROM TOKEN_GENERATOR)" generatedsql = "{ds_generator}".format(ds_generator=ds_generator) if dataset_access_type and dataset_access_type !="%": op = ("=", "like")["%" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op binds['dataset_access_type'] = dataset_access_type binds['is_dataset_valid'] = is_dataset_valid else: binds['is_dataset_valid'] = is_dataset_valid if wheresql_dataset_id_list and wheresql_dataset_id_range: sql = generatedsql + 'SELECT' + basesql + wheresql + " and ( " \ + wheresql_dataset_id_list + " or " + wheresql_dataset_id_range + " )" elif wheresql_dataset_id_list and not wheresql_dataset_id_range: sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_list elif not wheresql_dataset_id_list and wheresql_dataset_id_range: sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_range else: #for the GET method binds.update(is_dataset_valid = is_dataset_valid) if cdate != 0: wheresql += "AND D.CREATION_DATE = :cdate " binds.update(cdate = cdate) elif min_cdate != 0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate " binds.update(min_cdate = min_cdate) binds.update(max_cdate = max_cdate) elif min_cdate != 0 and max_cdate == 0: wheresql += "AND D.CREATION_DATE > :min_cdate " binds.update(min_cdate = min_cdate) elif min_cdate ==0 and max_cdate != 0: wheresql += "AND D.CREATION_DATE < :max_cdate " binds.update(max_cdate = max_cdate) else: pass if ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate " binds.update(ldate = ldate) elif min_ldate != 0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate " binds.update(min_ldate = min_ldate) binds.update(max_ldate = max_ldate) elif min_ldate != 0 and max_ldate == 0: wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate " binds.update(min_ldate = min_ldate) elif min_ldate ==0 and max_ldate != 0: wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate " binds.update(max_ldate = max_ldate) else: pass if create_by: wheresql += " AND D.CREATE_BY = :create_by " binds.update(create_by = create_by) if last_modified_by: wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by " binds.update(last_modified_by = last_modified_by) if prep_id: wheresql += "AND D.PREP_ID = :prep_id " binds.update(prep_id = prep_id) if dataset and dataset != "%": op = ("=", "like")["%" in dataset] wheresql += " AND D.DATASET %s :dataset " % op binds.update(dataset = dataset) if dataset_id != -1: wheresql += " AND D.DATASET_ID = :dataset_id " binds.update(dataset_id = dataset_id) if primary_ds_name and primary_ds_name != "%": op = ("=", "like")["%" in primary_ds_name ] wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op binds.update(primary_ds_name = primary_ds_name) if processed_ds_name and processed_ds_name != "%": op = ("=", "like")["%" in processed_ds_name ] wheresql += " AND PD.PROCESSED_DS_NAME %s :processed_ds_name " % op binds.update(processed_ds_name = processed_ds_name) if data_tier_name and data_tier_name != "%": op = ("=", "like")["%" in data_tier_name ] wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op binds.update(data_tier_name=data_tier_name) if dataset_access_type and dataset_access_type !="%": op = ("=", "like")["%" in dataset_access_type] wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op binds.update(dataset_access_type=dataset_access_type) if primary_ds_type and primary_ds_type !="%": op = ("=", "like")["%" in primary_ds_type] wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " %op binds.update(primary_ds_type=primary_ds_type) if physics_group_name and physics_group_name !="%": op = ("=", "like")["%" in physics_group_name] wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " %op binds.update(physics_group_name=physics_group_name) if parent_dataset: basesql = "PDS.DATASET PARENT_DATASET, " + basesql basesql += """ LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID """ % ((self.owner,)*2) wheresql += " AND PDS.DATASET = :parent_dataset " binds.update(parent_dataset = parent_dataset) if release_version or pset_hash or app_name or output_module_label: basesql = """ OMC.OUTPUT_MODULE_LABEL, OMC.GLOBAL_TAG, RV.RELEASE_VERSION, PSH.PSET_HASH, AEX.APP_NAME, """ + basesql basesql += """ LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID """ % ((self.owner,)*5) if release_version: op = ("=", "like")["%" in release_version] wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op binds.update(release_version=release_version) if pset_hash: op = ("=", "like")["%" in pset_hash] wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op binds.update(pset_hash = pset_hash) if app_name: op = ("=", "like")["%" in app_name] wheresql += " AND AEX.APP_NAME %s :app_name " % op binds.update(app_name = app_name) if output_module_label: op = ("=", "like")["%" in output_module_label] wheresql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label " % op binds.update(output_module_label=output_module_label) if global_tag: op = ("=", "like")["%" in global_tag] wheresql += " AND OMC.GLOBAL_TAG %s :global_tag " % op binds.update(global_tag=global_tag) if processing_version != 0: #op = ("=", "like")["%" in processing_version] op = "=" wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op binds.update(pversion=processing_version) if acquisition_era: op = ("=", "like")["%" in acquisition_era] wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op binds.update(aera=acquisition_era) # This should resolve to original cases that were in the business logic if (not logical_file_name or logical_file_name=="%") and (run_num==-1): # """JUST EXECUTE THE QUERY HERE""" sql = "SELECT " + basesql + wheresql elif (run_num==-1) and logical_file_name and logical_file_name !="%": # """DO execute 1 thingy""" sql = "SELECT DISTINCT " + basesql sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name " binds.update(logical_file_name = logical_file_name) sql += wheresql elif(run_num != -1 ): # """Do execute 2 thingy""" sql += "SELECT DISTINCT " + basesql if logical_file_name: sql += "JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name " binds.update(logical_file_name = logical_file_name) else: sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % (self.owner) # sql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % (self.owner) run_list = [] wheresql_run_list='' wheresql_run_range='' for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: wheresql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: wheresql += " and " + wheresql_run_list sql += wheresql else: dbsExceptionHandler("dbsException-invalid-input", "Oracle/Dataset/List. Proper parameters are not\ provided for listDatasets call.", self.logger.exception) #self.logger.error( sql) #self.logger.error("binds=%s" %binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i,size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, dataset="", block_name="", logical_file_name="", release_version="", pset_hash="", app_name="", output_module_label="", run_num=-1, origin_site_name="", lumi_list=[], validFileOnly=0, sumOverLumi=0, transaction=False): if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/File/List. Expects db connection from upper layer.", self.logger.exception) sql = self.sql_cond binds = {} sql_sel = self.sql_sel run_generator = '' lfn_generator = '' lumi_generator = '' sql_lumi = '' #import pdb #pdb.set_trace() if run_num != -1 : sql_sel = sql_sel.replace("SELECT", "SELECT DISTINCT") + " , FL.RUN_NUM " sql += " JOIN %sFILE_LUMIS FL on FL.FILE_ID=F.FILE_ID " %(self.owner) if release_version or pset_hash or app_name or output_module_label : sql += """LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID """ % ((self.owner,)*5) #FIXME : the status check should only be done with normal/super user #sql += """WHERE F.IS_FILE_VALID = 1""" # for the time being lests list all files #WMAgent requires validaFileOnly. YG 1/30/2015 if int(validFileOnly) == 0: sql += """ WHERE F.IS_FILE_VALID <> -1 """ elif int(validFileOnly) == 1 : sql += """ WHERE F.IS_FILE_VALID = 1 AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ else: dbsExceptionHandler("dbsException-invalid-input", "invalid value for validFileOnly.", self.logger.exception) if block_name: if isinstance(block_name, list): dbsExceptionHandler('dbsException-invalid-input', 'Input block_name is a list instead of string.', self.logger.exception) sql += " AND B.BLOCK_NAME = :block_name" binds.update({"block_name":block_name}) if logical_file_name: if type(logical_file_name) is not list: op = ("=", "like")["%" in logical_file_name] sql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op binds.update({"logical_file_name":logical_file_name}) if type(logical_file_name) is list: ds_generator, binds2 = create_token_generator(logical_file_name) binds.update(binds2) sql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator = "{ds_generator}".format(ds_generator=ds_generator) if dataset: if isinstance(dataset, list): dbsExceptionHandler('dbsException-invalid-input', 'Input dataset is a list instead of string.', self.logger.exception) sql += " AND D.DATASET = :dataset" binds.update({"dataset":dataset}) if release_version: op = ("=", "like")["%" in release_version] sql += " AND RV.RELEASE_VERSION %s :release_version" % op binds.update({"release_version":release_version}) if pset_hash: op = ("=", "like")["%" in pset_hash] sql += " AND PSH.PSET_HASH %s :pset_hash" % op binds.update({"pset_hash" :pset_hash}) if app_name: op = ("=", "like")["%" in app_name] sql += " AND AEX.APP_NAME %s :app_name" % op binds.update({"app_name": app_name}) if output_module_label: op = ("=", "like")["%" in output_module_label] sql += " AND OMC.OUTPUT_MODULE_LABEL %s :output_module_label" %op binds.update({"output_module_label":output_module_label}) if (origin_site_name): op = ("=", "like")["%" in origin_site_name] sql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name" % op binds.update({"origin_site_name":origin_site_name}) if run_num != -1 and run_num : # elimnate empty list run_num=[] run_list=[] wheresql_run_list='' wheresql_run_range='' wheresql_run_range_ct = 0 try: run_num = long(run_num) sql += " and FL.RUN_NUM = :run_num " binds.update({"run_num":run_num}) except: if isinstance(run_num, basestring): for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) if not lumi_list: if wheresql_run_range_ct >0 : wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) binds.update({"minrun%s"%wheresql_run_range_ct :r[0]}) binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) else: dbsExceptionHandler('dbsException-invalid-input', "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. ", self.logger.exception) elif type(run_num) is list and len(run_num)==1: try: run_num = long(run_num[0]) sql += " and FL.RUN_NUM = :run_num " binds.update({"run_num":run_num}) except: for r in parseRunRange(run_num): if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) if not lumi_list: if wheresql_run_range_ct >0 : wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) binds.update({"minrun%s"%wheresql_run_range_ct :r[0]}) binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) else: dbsExceptionHandler('dbsException-invalid-input', "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']", self.logger.exception) else: for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) if not lumi_list: if wheresql_run_range_ct >0 : wheresql_run_range += " or " wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) binds.update({"minrun%s"%wheresql_run_range_ct :r[0]}) binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]}) wheresql_run_range_ct += 1 else: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) # if run_list and not lumi_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) #sql = "{run_generator}".format(run_generator=run_generator) + sql binds.update(run_binds) if wheresql_run_range and wheresql_run_list: sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )" elif wheresql_run_range and not wheresql_run_list: sql += " and " + wheresql_run_range elif not wheresql_run_range and wheresql_run_list: sql += " and " + wheresql_run_list # Make sure when we have a lumi_list, there is only ONE run -- YG 14/05/2013 if (lumi_list and len(lumi_list) != 0): if len(run_list) > 1: dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.", self.logger.exception) sql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " sql_lumi = " FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " #Do I need to convert lumi_list to be a str list? YG 10/03/13 #Yes, you do. YG lumi_list = map(str, lumi_list) lumi_generator, lumi_binds = create_token_generator(lumi_list) #sql_sel = "{lumi_generator}".format(lumi_generator=lumi_generator) + sql_sel binds.update(lumi_binds) #binds["run_num"]=run_list[0] # if (run_generator and lfn_generator) or (lumi_generator and lfn_generator): dbsExceptionHandler('dbsException-invalid-input2', "cannot supply more than one list (lfn, run_num or lumi) at one query", self.logger.exception, "dao/File/list cannot have more than one list (lfn, run_num, lumi) as input pareamters") # only one with and generators should be named differently for run and lfn. #sql = run_generator + lfn_generator + lumi_generator + sql_sel + sql else: if run_num != -1 and int(sumOverLumi) == 1: sql_sel = sql_sel.replace('F.EVENT_COUNT,', '') sql = \ 'with myfiles as ( ' + sql_sel + sql + """) select mf.* , (case when badi.file_id = mc.file_id and badi.run_num=mc.run_num and badi.bid is null then null else mc.event_count end) as event_count from myfiles mf, EVENT_COUNT_WITH_LUMI, ( select distinct fl.file_id, fl.run_num, null as bid from %sfile_lumis fl join myfiles my2 on my2.file_id=fl.file_id and my2.run_num=fl.run_num where fl.event_count is null )badi where mf.file_id= mc.file_id and mf.run_num=mc.run_num """%self.owner if not lumi_list: ent_ct = """ (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num from %sfile_lumis fl join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num group by fl.file_id, fl.run_num) mc """%self.owner sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct) else: ent_ct = lumi_generator + """ (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num from %sfile_lumis fl join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num where sql_lumi group by fl.file_id, fl.run_num) mc """%self.owner sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct) else: sql = run_generator + lfn_generator + lumi_generator + sql_sel + sql self.logger.debug("SQL: " + sql) self.logger.debug("***********************") self.logger.debug(binds) try: self.logger.debug("******before cursor**********") cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) self.logger.debug("******after cursor**********") except Exception as e : self.logger.error(str(e)) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d
def execute(self, conn, block_name="", dataset="", detail=0, transaction=False): binds = {} generatedsql='' if dataset: where_clause = "WHERE DS.dataset=:dataset" if detail: sql = """ with t1 as( SELECT BS.BLOCK_NAME as BLOCK_NAME, NVL(SUM(FS.EVENT_COUNT),0) as NUM_EVENT FROM {owner}FILES FS {block_join} {dataset_join} {where_clause} group by BS.BLOCK_NAME ) select b.block_name as block_name, b.file_count as num_file, b.block_size as file_size, t1.num_event as num_event, b.open_for_writing as open_for_writing from {owner}blocks b, t1 where t1.block_name = b.block_name """.format(owner=self.owner, where_clause=where_clause, dataset_join=self.dataset_join, block_join=self.block_join) else: sql = """SELECT ( SELECT NVL(SUM(BS.BLOCK_SIZE), 0) FROM {owner}BLOCKS BS {dataset_join} {where_clause} ) AS FILE_SIZE, ( SELECT NVL(SUM(BS.FILE_COUNT),0) FROM {owner} BLOCKS BS {dataset_join} {where_clause} ) AS NUM_FILE, ( SELECT NVL(SUM(FS.EVENT_COUNT),0) FROM {owner}FILES FS {block_join} {dataset_join} {where_clause} ) AS NUM_EVENT FROM DUAL""".format(owner=self.owner, where_clause=where_clause, dataset_join=self.dataset_join, block_join=self.block_join) binds.update(dataset=dataset) else: # Oracle IN only supports a maximum of 1,000 values # (ORA-01795: maximum number of expressions in a list is 1000) if isinstance(block_name, basestring): block_name=[block_name] block_clause = "BS.BLOCK_NAME IN (SELECT TOKEN FROM TOKEN_GENERATOR) " generatedsql, run_binds = create_token_generator(block_name) binds.update(run_binds) where_clause = "WHERE {block_clause}".format(block_clause=block_clause) if detail: sql = generatedsql + \ """ select b.block_name as block_name, b.file_count as num_file, b.block_size as file_size, t1.num_event as num_event, b.open_for_writing as open_for_writing from {owner}blocks b, (select bs.block_name as block_name, NVL(sum(fs.event_count),0) as num_event from {owner}files fs {block_join} {where_clause} group by bs.block_name )t1 where t1.block_name = b.block_name """.format(owner=self.owner, block_join=self.block_join, where_clause=where_clause) else: sql = generatedsql + \ """SELECT ( SELECT NVL(SUM(BS.BLOCK_SIZE),0) FROM {owner}BLOCKS BS {where_clause} ) AS FILE_SIZE, ( SELECT NVL(SUM(BS.FILE_COUNT),0) FROM {owner} BLOCKS BS {where_clause} ) AS NUM_FILE, ( SELECT NVL(SUM(FS.EVENT_COUNT),0) FROM {owner}FILES FS {block_join} {where_clause} ) AS NUM_EVENT FROM DUAL""".format(owner=self.owner, where_clause=where_clause, block_join=self.block_join) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] #self.logger.debug(sql) #self.logger.debug(binds) for cursor in cursors: result.extend(self.formatCursor(cursor)) return result
def execute(self, conn, block_name="", dataset="", transaction=False): binds = {} generatedsql='' if dataset: where_clause = "WHERE DS.dataset=:dataset" sql = """SELECT ( SELECT SUM(BS.BLOCK_SIZE) FROM {owner}BLOCKS BS {dataset_join} {where_clause} ) AS FILE_SIZE, ( SELECT SUM(BS.FILE_COUNT) FROM {owner} BLOCKS BS {dataset_join} {where_clause} ) AS NUM_FILE, ( SELECT SUM(FS.EVENT_COUNT) FROM {owner}FILES FS {block_join} {dataset_join} {where_clause} ) AS NUM_EVENT FROM DUAL""".format(owner=self.owner, where_clause=where_clause, dataset_join=self.dataset_join, block_join=self.block_join) binds.update(dataset=dataset) else: # Oracle IN only supports a maximum of 1,000 values # (ORA-01795: maximum number of expressions in a list is 1000) if isinstance(block_name,str): block_name=[block_name] block_clause = "BS.BLOCK_NAME IN (SELECT TOKEN FROM TOKEN_GENERATOR) " generatedsql, run_binds = create_token_generator(block_name) binds.update(run_binds) """ block_clause = "BS.BLOCK_NAME IN (" for counter, this_block_name in enumerate(block_name): block_label = 'block_%s' % counter binds.update({block_label: this_block_name}) block_clause += ":%s, " % block_label block_clause = block_clause[:-2]+")"#remove last comma and space in the list above """ where_clause = "WHERE {block_clause}".format(block_clause=block_clause) sql = generatedsql + \ """SELECT ( SELECT SUM(BS.BLOCK_SIZE) FROM {owner}BLOCKS BS {where_clause} ) AS FILE_SIZE, ( SELECT SUM(BS.FILE_COUNT) FROM {owner} BLOCKS BS {where_clause} ) AS NUM_FILE, ( SELECT SUM(FS.EVENT_COUNT) FROM {owner}FILES FS {block_join} {where_clause} ) AS NUM_EVENT FROM DUAL""".format(owner=self.owner, where_clause=where_clause, block_join=self.block_join) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for cursor in cursors: result.extend(self.formatCursor(cursor)) return result
def execute(self, conn, child_block_name='', child_lfn_list=[], transaction=False): sql = '' binds = {} child_ds_name = '' child_where = '' if child_block_name: child_ds_name = child_block_name.split('#')[0] parent_where = " where d.dataset = :child_ds_name ))" binds ={"child_ds_name": child_ds_name} else: dbsExceptionHandler('dbsException-invalid-input', "Missing child block_name for listFileParentsByLumi. ") # if not child_lfn_list: # most use cases child_where = " where b.block_name = :child_block_name )" binds.update({"child_block_name": child_block_name}) sql = """ with parents as ( """ +\ self.parent_sql +\ parent_where +\ """), """+\ """ children as ( """ +\ self.child_sql +\ child_where +\ """) select distinct cid, pid from children c inner join parents p on c.R = p.R and c.L = p.L """ else: # not commom child_where = """ where b.block_name = :child_block_name and f.logical_file_name in (SELECT TOKEN FROM TOKEN_GENERATOR) )) """ lfn_generator, bind = create_token_generator(child_lfn_list) binds.update(bind) sql = lfn_generator +\ """ with parents as ( """ +\ self.parent_sql +\ parent_where +\ """), """+\ """ children as ( """ +\ self.child_sql +\ child_where +\ """) select distinct cid, pid from children c inner join parents p on c.R = p.R and c.L = p.L """ print(sql) r = self.dbi.processData(sql, binds, conn, transaction=transaction) print(self.format(r)) return self.format(r) """
def execute(self, conn, block_name="", dataset="", run_num=-1, validFileOnly=0, sumOverLumi=0, transaction=False): binds = {} whererun = '' run_list = [] wheresql_run_list = '' wheresql_run_range = '' wheresql_isFileValid = '' join_valid_ds1 = '' join_valid_ds2 = '' join_bk_fl = '' sql = '' # if int(validFileOnly) == 1 : wheresql_isFileValid =""" and f.is_file_valid = 1 and DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """ join_valid_ds1 = """ JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """% ((self.owner,)*2) join_valid_ds2 = """ JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """% ((self.owner,)*1) join_bk_fl = """ join %sfiles f on f.block_id = b.block_id """ % ((self.owner,)*1) # if run_num != -1: # for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, (long, int)): #if not wheresql_run_list: # wheresql_run_list = " fl.RUN_NUM = :run_list " run_list.append(str(r)) if isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun " binds.update({"minrun":r[0]}) binds.update({"maxrun":r[1]}) # if run_list: wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) " run_generator, run_binds = create_token_generator(run_list) sql = "{run_generator}".format(run_generator=run_generator) binds.update(run_binds) if wheresql_run_list and wheresql_run_range: whererun = wheresql_run_range + " or " + wheresql_run_list elif wheresql_run_list: whererun = wheresql_run_list elif wheresql_run_range: whererun = wheresql_run_range self.logger.debug('sumOverLumi=%s' %sumOverLumi) if block_name: if run_num != -1: if int(sumOverLumi) == 0: # sql = sql +\ """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} ) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}files f on f.block_id=b.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) )as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1) binds.update({"block_name":block_name}) elif int(sumOverLumi) == 1: sql = sql +\ """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} ) ) as num_file, ( with myFiles as (select file_id from {owner}files f join {owner}blocks b on b.block_id = f.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} ) select sum(fl.event_count) event_count from {owner}file_lumis fl join myFiles on myFiles.file_id = fl.file_id where {whererun} and not exists (select fl2.file_id from {owner}file_lumis fl2 join myFiles on myFiles.file_id = fl2.file_id where fl2.event_count is null ) ) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}files f on f.block_id=b.block_id {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) )as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1) binds.update({"block_name":block_name}) else: sql = """ select (select count(f.file_id) from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid} ) as file_size, (select nvl(count(distinct b.block_id),0) from {owner}blocks b {join_bk_fl} {join_valid_ds1} where b.block_name=:block_name {wheresql_isFileValid} ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f join {owner}file_lumis l on l.file_id=f.file_id join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1} where b.BLOCK_NAME=:block_name {wheresql_isFileValid}) ) as num_lumi from dual """ .format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1, join_bk_fl=join_bk_fl) binds.update({"block_name":block_name}) elif dataset: if run_num != -1: if int(sumOverLumi) == 0: sql = sql + \ """ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} join {owner}files f on f.block_id = b.block_id where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2) binds.update({"dataset":dataset}) elif int(sumOverLumi) == 1: sql = sql + \ """ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_file, ( with myFiles as (select file_id from {owner}files f join {owner}datasets d on d.dataset_id = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) select sum(fl.event_count) event_count from {owner}file_lumis fl join myFiles on myFiles.file_id = fl.file_id where {whererun} and not exists (select fl2.file_id from {owner}file_lumis fl2 join myFiles on myFiles.file_id = fl2.file_id where fl2.event_count is null ) ) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} join {owner}files f on f.block_id = b.block_id where d.dataset=:dataset {wheresql_isFileValid} and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun}) ) as num_block, (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f join {owner}file_lumis fl on fl.file_id=f.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} and {whererun} ) ) as num_lumi from dual """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2) binds.update({"dataset":dataset}) else: sql = """ \ select (select count(f.file_id) from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) as num_file, nvl((select sum(f.event_count) event_count from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ),0) as num_event, (select nvl(sum(f.file_size),0) file_size from {owner}files f join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid} ) as file_size, (select count(distinct b.block_id) from {owner}blocks b join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2} {join_bk_fl} where d.dataset=:dataset {wheresql_isFileValid} ) as num_block, (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}file_lumis l join {owner}files f on f.file_id=l.file_id join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2} where d.dataset=:dataset {wheresql_isFileValid}) ) as num_lumi from dual """.format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2, join_bk_fl=join_bk_fl) binds.update({"dataset":dataset}) else: return #self.logger.error(sql) #self.logger.error(binds) cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) for i in cursors: d = self.formatCursor(i, size=100) if isinstance(d, list) or isinstance(d, GeneratorType): for elem in d: yield elem elif d: yield d