Beispiel #1
0
    def execute(self, conn, run_num=-1, logical_file_name="", block_name="", dataset="", trans=False):
        """
        Lists all primary datasets if pattern is not provided.
        """
	if not conn:
	    dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/DatasetRun/List. Expects db connection from upper layer.")

        sql = self.sql
        binds = {}
	if logical_file_name and "%" not in logical_file_name:
	    sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
		    WHERE FILES.LOGICAL_FILE_NAME = :logical_file_name"""%(self.owner)
	    binds["logical_file_name"] = logical_file_name
	elif block_name and "%" not in block_name:
            sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
		    inner join %sBLOCKS BLOCKS on BLOCKS.BLOCK_ID = FILES.BLOCK_ID
		    WHERE BLOCKS.BLOCK_NAME = :block_name """%(self.owner, self.owner)
            binds["block_name"] =  block_name
	elif dataset and "%" not in dataset:
	    sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
	    inner join %sDATASETS DATASETS on DATASETS.DATASET_ID = FILES.DATASET_ID
	    WHERE DATASETS.DATASET = :dataset """%(self.owner, self.owner)
	    binds["dataset"] = dataset
	else:
	    pass
        
	if run_num != -1:
            andorwhere = ("WHERE", "AND")["WHERE" in sql]
            run_list = []
            wheresql_run_list = ''
            wheresql_run_range = ''
            #
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.")
                    wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            # 
            if run_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                sql =  "{run_generator}".format(run_generator=run_generator) + sql
                binds.update(run_binds)

            if wheresql_run_range and wheresql_run_list:
                sql += " %s (" %andorwhere    + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " %s " %andorwhere  + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " %s " %andorwhere  + wheresql_run_list
        #self.logger.debug(sql)
	cursors = self.dbi.processData(sql, binds, conn, transaction=trans, returnCursor=True)
        result=[]
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
        return result
Beispiel #2
0
    def execute(self, conn, run_num=-1, logical_file_name="", block_name="", dataset="", trans=False):
        """
        Lists all primary datasets if pattern is not provided.
        """
	if not conn:
	    dbsExceptionHandler("dbsException-db-conn-failed","Oracle/DatasetRun/List. Expects db connection from upper layer.")

        sql = self.sql
        binds = {}
	if logical_file_name and "%" not in logical_file_name:
	    sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
		    WHERE FILES.LOGICAL_FILE_NAME = :logical_file_name"""%(self.owner)
	    binds["logical_file_name"] = logical_file_name
	elif block_name and "%" not in block_name:
            sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
		    inner join %sBLOCKS BLOCKS on BLOCKS.BLOCK_ID = FILES.BLOCK_ID
		    WHERE BLOCKS.BLOCK_NAME = :block_name """%(self.owner, self.owner)
            binds["block_name"] =  block_name
	elif dataset and "%" not in dataset:
	    sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
	    inner join %sDATASETS DATASETS on DATASETS.DATASET_ID = FILES.DATASET_ID
	    WHERE DATASETS.DATASET = :dataset """%(self.owner, self.owner)
	    binds["dataset"] = dataset
	else:
	    pass
        
	if run_num != -1:
            andorwhere = ("WHERE", "AND")["WHERE" in sql]
            run_list = []
            wheresql_run_list = ''
            wheresql_run_range = ''
            #
            for r in parseRunRange(run_num):
                if isinstance(r, str) or isinstance(r, int) or isinstance(r, long):
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.")
                    wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            # 
            if run_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                sql =  "{run_generator}".format(run_generator=run_generator) + sql
                binds.update(run_binds)

            if wheresql_run_range and wheresql_run_list:
                sql += " %s (" %andorwhere    + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " %s " %andorwhere  + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " %s " %andorwhere  + wheresql_run_list
        #self.logger.debug(sql)
	cursors = self.dbi.processData(sql, binds, conn, transaction=trans, returnCursor=True)
        result=[]
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
        return result
Beispiel #3
0
    def execute(self,
                conn,
                dataset="",
                block_name="",
                logical_file_name="",
                release_version="",
                pset_hash="",
                app_name="",
                output_module_label="",
                run_num=-1,
                origin_site_name="",
                lumi_list=[],
                validFileOnly=0,
                sumOverLumi=0,
                transaction=False):
        if not conn:
            dbsExceptionHandler(
                "dbsException-failed-connect2host",
                "Oracle/File/List. Expects db connection from upper layer.",
                self.logger.exception)
        sql = self.sql_cond
        binds = {}
        sql_sel = self.sql_sel
        run_generator = ''
        lfn_generator = ''
        lumi_generator = ''
        sql_lumi = ''
        #import pdb
        #pdb.set_trace()
        if run_num != -1:
            sql_sel = sql_sel.replace("SELECT",
                                      "SELECT DISTINCT") + " , FL.RUN_NUM  "
            sql += " JOIN %sFILE_LUMIS FL on  FL.FILE_ID=F.FILE_ID " % (
                self.owner)
        if release_version or pset_hash or app_name or output_module_label:
            sql += """LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID
                      LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID
                      LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID
                      LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID
                      LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID
                      """ % ((self.owner, ) * 5)
#FIXME : the status check should only be done with normal/super user
#sql += """WHERE F.IS_FILE_VALID = 1"""
# for the time being lests list all files
#WMAgent requires validaFileOnly. YG 1/30/2015
        if int(validFileOnly) == 0:
            sql += """ WHERE F.IS_FILE_VALID <> -1 """
        elif int(validFileOnly) == 1:
            sql += """ WHERE F.IS_FILE_VALID = 1 
                       AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """
        else:
            dbsExceptionHandler("dbsException-invalid-input",
                                "invalid value for validFileOnly.",
                                self.logger.exception)

        if block_name:
            if isinstance(block_name, list):
                dbsExceptionHandler(
                    'dbsException-invalid-input',
                    'Input block_name is a list instead of string.',
                    self.logger.exception)
            sql += " AND B.BLOCK_NAME = :block_name"
            binds.update({"block_name": block_name})
        if logical_file_name:
            if type(logical_file_name) is not list:
                op = ("=", "like")["%" in logical_file_name]
                sql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op
                binds.update({"logical_file_name": logical_file_name})
            if type(logical_file_name) is list:
                ds_generator, binds2 = create_token_generator(
                    logical_file_name)
                binds.update(binds2)
                sql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
                lfn_generator = "{ds_generator}".format(
                    ds_generator=ds_generator)
        if dataset:
            if isinstance(dataset, list):
                dbsExceptionHandler(
                    'dbsException-invalid-input',
                    'Input dataset is a list instead of string.',
                    self.logger.exception)
            sql += " AND D.DATASET = :dataset"
            binds.update({"dataset": dataset})
        if release_version:
            op = ("=", "like")["%" in release_version]
            sql += " AND RV.RELEASE_VERSION %s :release_version" % op
            binds.update({"release_version": release_version})
        if pset_hash:
            op = ("=", "like")["%" in pset_hash]
            sql += " AND PSH.PSET_HASH %s :pset_hash" % op
            binds.update({"pset_hash": pset_hash})
        if app_name:
            op = ("=", "like")["%" in app_name]
            sql += " AND AEX.APP_NAME %s :app_name" % op
            binds.update({"app_name": app_name})
        if output_module_label:
            op = ("=", "like")["%" in output_module_label]
            sql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label" % op
            binds.update({"output_module_label": output_module_label})
        if (origin_site_name):
            op = ("=", "like")["%" in origin_site_name]
            sql += " AND B.ORIGIN_SITE_NAME %s  :origin_site_name" % op
            binds.update({"origin_site_name": origin_site_name})
        if run_num != -1 and run_num:  # elimnate empty list run_num=[]
            run_list = []
            wheresql_run_list = ''
            wheresql_run_range = ''
            wheresql_run_range_ct = 0
            try:
                run_num = long(run_num)
                sql += " and FL.RUN_NUM = :run_num "
                binds.update({"run_num": run_num})
            except:
                if isinstance(run_num, basestring):
                    for r in parseRunRange(run_num):
                        if isinstance(r, run_tuple):
                            if r[0] == r[1]:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "DBS run range must be apart at least by 1.",
                                    self.logger.exception)
                            if not lumi_list:
                                if wheresql_run_range_ct > 0:
                                    wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % (
                                    (wheresql_run_range_ct, ) * 2)
                                binds.update({
                                    "minrun%s" % wheresql_run_range_ct:
                                    int(r[0])
                                })
                                binds.update({
                                    "maxrun%s" % wheresql_run_range_ct:
                                    int(r[1])
                                })
                                wheresql_run_range_ct += 1
                            else:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "When lumi_list is given, only one run is allowed.",
                                    self.logger.exception)
                        else:
                            dbsExceptionHandler(
                                'dbsException-invalid-input',
                                "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. ",
                                self.logger.exception)
                elif type(run_num) is list and len(run_num) == 1:
                    try:
                        run_num = long(run_num[0])
                        sql += " and FL.RUN_NUM = :run_num "
                        binds.update({"run_num": run_num})
                    except:
                        for r in parseRunRange(run_num):
                            if isinstance(r, run_tuple):
                                if r[0] == r[1]:
                                    dbsExceptionHandler(
                                        'dbsException-invalid-input',
                                        "DBS run range must be apart at least by 1.",
                                        self.logger.exception)
                                if not lumi_list:
                                    if wheresql_run_range_ct > 0:
                                        wheresql_run_range += " or "
                                    wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % (
                                        (wheresql_run_range_ct, ) * 2)
                                    binds.update({
                                        "minrun%s" % wheresql_run_range_ct:
                                        int(r[0])
                                    })
                                    binds.update({
                                        "maxrun%s" % wheresql_run_range_ct:
                                        int(r[1])
                                    })
                                    wheresql_run_range_ct += 1
                                else:
                                    dbsExceptionHandler(
                                        'dbsException-invalid-input',
                                        "When lumi_list is given, only one run is allowed.",
                                        self.logger.exception)
                            else:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']",
                                    self.logger.exception)
                else:
                    for r in parseRunRange(run_num):
                        if isinstance(r, basestring) or isinstance(
                                r, int) or isinstance(r, long):
                            run_list.append(str(r))
                        if isinstance(r, run_tuple):
                            if r[0] == r[1]:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "DBS run range must be apart at least by 1.",
                                    self.logger.exception)
                            if not lumi_list:
                                if wheresql_run_range_ct > 0:
                                    wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % (
                                    (wheresql_run_range_ct, ) * 2)
                                binds.update({
                                    "minrun%s" % wheresql_run_range_ct:
                                    int(r[0])
                                })
                                binds.update({
                                    "maxrun%s" % wheresql_run_range_ct:
                                    int(r[1])
                                })
                                wheresql_run_range_ct += 1
                            else:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "When lumi_list is given, only one run is allowed.",
                                    self.logger.exception)
            #
            if run_list and not lumi_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                #sql =  "{run_generator}".format(run_generator=run_generator) + sql
                binds.update(run_binds)
            if wheresql_run_range and wheresql_run_list:
                sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " and " + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " and " + wheresql_run_list
        # Make sure when we have a lumi_list, there is only ONE run  -- YG 14/05/2013
        if (lumi_list and len(lumi_list) != 0):
            if len(run_list) > 1:
                dbsExceptionHandler(
                    'dbsException-invalid-input',
                    "When lumi_list is given, only one run is allowed.",
                    self.logger.exception)
            sql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            sql_lumi = " FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            #Do I need to convert lumi_list to be a str list? YG 10/03/13
            #Yes, you do. YG
            lumi_list = map(str, lumi_list)
            lumi_generator, lumi_binds = create_token_generator(lumi_list)
            #sql_sel = "{lumi_generator}".format(lumi_generator=lumi_generator) + sql_sel
            binds.update(lumi_binds)
            #binds["run_num"]=run_list[0]
        #
        if (run_generator and lfn_generator) or (lumi_generator
                                                 and lfn_generator):
            dbsExceptionHandler(
                'dbsException-invalid-input2',
                "cannot supply more than one list (lfn, run_num or lumi) at one query",
                self.logger.exception,
                "dao/File/list cannot have more than one list (lfn, run_num, lumi) as input pareamters"
            )
            # only one with and generators should be named differently for run and lfn.
            #sql = run_generator + lfn_generator + lumi_generator  + sql_sel + sql
        else:
            if run_num != -1 and int(sumOverLumi) == 1:
                sql_sel = sql_sel.replace('F.EVENT_COUNT,', '')
                sql = \
                 'with myfiles as ( ' + sql_sel + sql + """) select mf.* , 
                            (case 
                                when badi.file_id = mc.file_id and badi.run_num=mc.run_num and badi.bid is null then null 
                                else  mc.event_count 
                             end) as event_count 
                     from myfiles mf,   
                          EVENT_COUNT_WITH_LUMI, 
                          ( 
                            select distinct fl.file_id, fl.run_num, null as bid 
                            from %sfile_lumis fl  
                            join myfiles my2 on my2.file_id=fl.file_id and my2.run_num=fl.run_num 
                            where fl.event_count is null 
                         )badi 
                    where mf.file_id= mc.file_id and mf.run_num=mc.run_num  

                    """%self.owner
                if not lumi_list:
                    ent_ct = """
                            (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num 
                            from %sfile_lumis fl 
                            join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num 
                            group by fl.file_id, fl.run_num) mc 
                        """ % self.owner
                    sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct)
                else:
                    ent_ct = lumi_generator + """ 
                            (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num 
                            from %sfile_lumis fl 
                            join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num 
                            where sql_lumi  
                            group by fl.file_id, fl.run_num) mc 
                        """ % self.owner
                    sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct)
            else:
                sql = run_generator + lfn_generator + lumi_generator + sql_sel + sql
        self.logger.debug("SQL: " + sql)
        self.logger.debug("***********************")
        self.logger.debug(binds)
        try:
            self.logger.debug("******before cursor**********")
            cursors = self.dbi.processData(sql,
                                           binds,
                                           conn,
                                           transaction,
                                           returnCursor=True)
            self.logger.debug("******after cursor**********")
        except Exception as e:
            self.logger.error(str(e))
        for i in cursors:
            d = self.formatCursor(i, size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #4
0
    def execute(self,
                conn,
                dataset="",
                block_name="",
                logical_file_name="",
                release_version="",
                pset_hash="",
                app_name="",
                output_module_label="",
                run_num=-1,
                origin_site_name="",
                lumi_list=[],
                validFileOnly=0,
                transaction=False):
        if not conn:
            dbsExceptionHandler(
                "dbsException-db-conn-failed",
                "Oracle/File/BriefList. Expects db connection from upper layer."
            )
#import pdb
#pdb.set_trace()
        binds = {}
        run_generator = ''
        lfn_generator = ''
        lumi_generator = ''
        generatedsql = ''
        basesql = self.sql
        joinsql = ''
        # Check if file is valid. YG 1/29/2015
        if int(validFileOnly) == 0:
            wheresql = " WHERE F.IS_FILE_VALID <> -1 "
        elif int(validFileOnly) == 1:
            wheresql = " WHERE F.IS_FILE_VALID = 1 "
        else:
            dbsExceptionHandler("dbsException-invalid-input",
                                "invalid value for validFileOnly.")
        if logical_file_name:
            if type(logical_file_name) is not list:  #for GET call
                op = ("=", "like")["%" in logical_file_name]
                wheresql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name " % op
                binds.update({"logical_file_name": logical_file_name})
            elif type(logical_file_name) is list:  #for POST call
                lfn_generator, binds2 = create_token_generator(
                    logical_file_name)
                binds.update(binds2)
                wheresql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
                #generatedsql = "{lfn_generator}".format(lfn_generator=lfn_generator)
            if not dataset:
                if int(validFileOnly) == 1:
                    joinsql += """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                                  JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % (
                        (self.owner, ) * 2)
                    wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') "
                else:
                    pass
        if block_name:
            joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID " % (
                self.owner)
            wheresql += " AND B.BLOCK_NAME = :block_name "
            binds.update({"block_name": block_name})
            if not dataset and int(validFileOnly) == 1:
                joinsql += """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                               JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % (
                    (self.owner, ) * 2)
                wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') "
            else:
                pass

        if dataset:
            joinsql += """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID """ % (
                self.owner)
            wheresql += " AND D.DATASET = :dataset "
            binds.update({"dataset": dataset})
            if int(validFileOnly) == 1:
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID " % (
                    self.owner)
                wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') "
            else:
                pass

        if release_version or pset_hash or app_name or output_module_label:
            joinsql += """
		LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID
		LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID 
	    """ % ((self.owner, ) * 2)

        if release_version:
            joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID" % (
                self.owner)
            op = ("=", "like")["%" in release_version]
            wheresql += " AND RV.RELEASE_VERSION %s :release_version" % op
            binds.update(release_version=release_version)

        if pset_hash:
            joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID" % (
                self.owner)
            op = ("=", "like")["%" in pset_hash]
            wheresql += " AND PSH.PSET_HASH %s :pset_hash" % op
            binds.update(pset_hash=pset_hash)

        if app_name:
            joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID" % (
                self.owner)
            op = ("=", "like")["%" in app_name]
            wheresql += " AND AEX.APP_NAME %s :app_name" % op
            binds.update(app_name=app_name)

        if output_module_label:
            op = ("=", "like")["%" in output_module_label]
            wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label" % op
            binds.update(output_module_label=output_module_label)

        if (origin_site_name):
            if not block_name:
                joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID" % (
                    self.owner)
            op = ("=", "like")["%" in origin_site_name]
            wheresql += " AND B.ORIGIN_SITE_NAME %s  :origin_site_name" % op
            binds.update({"origin_site_name": origin_site_name})

        if run_num != -1 and run_num:  # elimnate empty list run_num=[]
            basesql = basesql.replace("SELECT", "SELECT DISTINCT")
            joinsql += " JOIN %sFILE_LUMIS FL on  FL.FILE_ID=F.FILE_ID " % (
                self.owner)
            run_list = []
            wheresql_run_list = ''
            wheresql_run_range = ''
            wheresql_run_range_ct = 0
            try:
                run_num = long(run_num)
                wheresql += " and FL.RUN_NUM = :run_num "
                binds.update({"run_num": run_num})
            except:
                if isinstance(run_num, basestring):
                    for r in parseRunRange(run_num):
                        if isinstance(r, run_tuple):
                            if r[0] == r[1]:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "DBS run range must be apart at least by 1."
                                )
                            if not lumi_list:
                                if wheresql_run_range_ct > 0:
                                    wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % (
                                    (wheresql_run_range_ct, ) * 2)
                                binds.update(
                                    {"minrun%s" % wheresql_run_range_ct: r[0]})
                                binds.update(
                                    {"maxrun%s" % wheresql_run_range_ct: r[1]})
                                wheresql_run_range_ct += 1
                            else:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "When lumi_list is given, only one run is allowed."
                                )
                        else:
                            dbsExceptionHandler(
                                'dbsException-invalid-input',
                                "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. "
                            )
                elif type(run_num) is list and len(run_num) == 1:
                    try:
                        run_num = long(run_num[0])
                        wheresql += " and FL.RUN_NUM = :run_num "
                        binds.update({"run_num": run_num})
                    except:
                        for r in parseRunRange(run_num):
                            if isinstance(r, run_tuple):
                                if r[0] == r[1]:
                                    dbsExceptionHandler(
                                        'dbsException-invalid-input',
                                        "DBS run range must be apart at least by 1."
                                    )
                                if not lumi_list:
                                    if wheresql_run_range_ct > 0:
                                        wheresql_run_range += " or "
                                    wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % (
                                        (wheresql_run_range_ct, ) * 2)
                                    binds.update({
                                        "minrun%s" % wheresql_run_range_ct:
                                        r[0]
                                    })
                                    binds.update({
                                        "maxrun%s" % wheresql_run_range_ct:
                                        r[1]
                                    })
                                    wheresql_run_range_ct += 1
                                else:
                                    dbsExceptionHandler(
                                        'dbsException-invalid-input',
                                        "When lumi_list is given, only one run is allowed."
                                    )
                            else:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']"
                                )
                else:
                    for r in parseRunRange(run_num):
                        if isinstance(r, basestring) or isinstance(
                                r, int) or isinstance(r, long):
                            run_list.append(str(r))
                        if isinstance(r, run_tuple):
                            if r[0] == r[1]:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "DBS run range must be apart at least by 1."
                                )
                            if not lumi_list:
                                if wheresql_run_range_ct > 0:
                                    wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " % (
                                    (wheresql_run_range_ct, ) * 2)
                                binds.update(
                                    {"minrun%s" % wheresql_run_range_ct: r[0]})
                                binds.update(
                                    {"maxrun%s" % wheresql_run_range_ct: r[1]})
                                wheresql_run_range_ct += 1
                            else:
                                dbsExceptionHandler(
                                    'dbsException-invalid-input',
                                    "When lumi_list is given, only one run is allowed."
                                )
            if run_list and not lumi_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                binds.update(run_binds)
            if wheresql_run_range and wheresql_run_list:
                wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                wheresql += " and ( " + wheresql_run_range + " ) "
            elif not wheresql_run_range and wheresql_run_list:
                wheresql += " and " + wheresql_run_list

# Make sure when we have a lumi_list, there is only ONE run  -- YG 14/05/2013
        if (lumi_list and len(lumi_list) != 0):
            if len(run_list) > 1:
                dbsExceptionHandler(
                    'dbsException-invalid-input',
                    "When lumi_list is given, only one run is allowed.")
            wheresql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            #Do I need to convert lumi_list to be a str list? YG 10/03/13
            #Yes, you do. YG
            lumi_list = map(str, lumi_list)
            lumi_generator, lumi_binds = create_token_generator(lumi_list)
            binds.update(lumi_binds)
            #binds["run_num"]=run_list[0]
        #
        if (run_generator and lfn_generator) or (lumi_generator
                                                 and lfn_generator):
            self.logger.error("run_generator : %s" % run_generator)
            self.logger.error("lfn_generator:  %s" % lfn_generator)
            self.logger.error("lumi_generator: %s " % lumi_generator)
            dbsExceptionHandler(
                'dbsException-invalid-input2',
                message=
                "cannot supply more than one list (lfn, run_num or lumi) at one query.  ",
                serverError=
                "dao/File/list cannot have more than one list (lfn, run_num or lumi) as input pareamters"
            )
        # only one with and generators should be named differently for run and lfn.
        #sql = run_generator + lfn_generator + sql_sel + sql
        else:
            sql = run_generator + lfn_generator + lumi_generator + basesql + self.fromsql + joinsql + wheresql
        self.logger.debug("sql=%s" % sql)
        self.logger.debug("binds=%s" % binds)
        cursors = self.dbi.processData(sql,
                                       binds,
                                       conn,
                                       transaction,
                                       returnCursor=True)
        result = []
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
        return result
Beispiel #5
0
    def execute(self, conn, block_name="", dataset="",  run_num=-1, transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/SummaryList. Expects db connection from upper layer.")

        binds = {}
        whererun = ''
        run_list = []
        wheresql_run_list = ''
        wheresql_run_range = ''
        sql = ''
        if run_num != -1:
            #
            for r in parseRunRange(run_num):
                if isinstance(r, str) or isinstance(r, (long,int)):
                    #if not wheresql_run_list:
                    #    wheresql_run_list = " fl.RUN_NUM = :run_list "
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                    wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            #
            if run_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                sql =  "{run_generator}".format(run_generator=run_generator)
                binds.update(run_binds)
            if wheresql_run_list and wheresql_run_range:
                whererun = wheresql_run_range + " or " + wheresql_run_list
            elif wheresql_run_list:
                whererun = wheresql_run_list
            elif wheresql_run_range:
                whererun = wheresql_run_range
        if block_name:
            if run_num != -1:
                #
                sql = sql +\
                   """
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id
                     where b.BLOCK_NAME=:block_name
                     and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} )
                    ) as num_file,
                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id
                     where b.BLOCK_NAME=:block_name and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,
                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id
                     where b.BLOCK_NAME=:block_name and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,
                   (select count(distinct b.block_id) from {owner}blocks b
                   join {owner}files f on f.block_id=b.block_id
                   where b.block_name=:block_name and
                   f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                   )as num_block,
                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id
                    where b.BLOCK_NAME=:block_name and {whererun} )
                   ) as num_lumi
                   from dual
                    """.format(owner=self.owner, whererun=whererun)
                binds.update({"block_name":block_name})
            else:
                sql = """
                    select
                   (select count(f.file_id)  from %sfiles f
                     join %sblocks b on b.BLOCK_ID = f.block_id
                     where b.BLOCK_NAME=:block_name
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from %sfiles f
                     join %sblocks b on b.BLOCK_ID = f.block_id
                     where b.BLOCK_NAME=:block_name
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from %sfiles f
                     join %sblocks b on b.BLOCK_ID = f.block_id
                     where b.BLOCK_NAME=:block_name
                    ) as file_size,

                    (select count(block_id) from %sblocks where block_name=:block_name
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from %sfiles f
                    join %sfile_lumis l on l.file_id=f.file_id
                    join %sblocks b on b.BLOCK_ID = f.block_id
                    where b.BLOCK_NAME=:block_name)
                   ) as num_lumi
                   from dual
                    """ %((self.owner,)*10)
                binds.update({"block_name":block_name})

        elif dataset:
            if run_num != -1:
                sql = sql + \
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id
                     where d.dataset=:dataset and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id
                     where d.dataset=:dataset and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id
                     where d.dataset=:dataset and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id
                     join {owner}files f on f.block_id = b.block_id
                     where d.dataset=:dataset and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_block,

                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id
                    where d.dataset=:dataset and {whererun} )
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun)
                binds.update({"dataset":dataset})
            else:
                sql = """ \
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id
                     where d.dataset=:dataset
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id
                     where d.dataset=:dataset
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id
                     where d.dataset=:dataset
                    ) as file_size,

                    (select count(b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id
                     where d.dataset=:dataset
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f
                    join {owner}file_lumis l on l.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id
                    where d.dataset=:dataset)
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner)
                binds.update({"dataset":dataset})
        else:
            return []

	cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        result=[]
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
	return result
Beispiel #6
0
Datei: List.py Projekt: dmwm/DBS
    def execute(self, conn, dataset="", block_name="", logical_file_name="",
                release_version="", pset_hash="", app_name="", output_module_label="",
                run_num=-1, origin_site_name="", lumi_list=[], validFileOnly=0, sumOverLumi=0, transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/File/List. Expects db connection from upper layer.", self.logger.exception)
        sql = self.sql_cond
        binds = {}
        sql_sel = self.sql_sel
        run_generator = ''
	lfn_generator = ''
	lumi_generator = ''
        sql_lumi = ''
	#import pdb
	#pdb.set_trace()
        if run_num != -1 :
            sql_sel = sql_sel.replace("SELECT", "SELECT DISTINCT") + " , FL.RUN_NUM  "
            sql += " JOIN %sFILE_LUMIS FL on  FL.FILE_ID=F.FILE_ID " %(self.owner)
        if release_version or pset_hash or app_name or output_module_label :
            sql += """LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID
                      LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID
                      LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID
                      LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID
                      LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID
                      """ % ((self.owner,)*5)
	      #FIXME : the status check should only be done with normal/super user
        #sql += """WHERE F.IS_FILE_VALID = 1"""
        # for the time being lests list all files
        #WMAgent requires validaFileOnly. YG 1/30/2015
        if int(validFileOnly) == 0:
            sql += """ WHERE F.IS_FILE_VALID <> -1 """
        elif int(validFileOnly) == 1 :
            sql += """ WHERE F.IS_FILE_VALID = 1 
                       AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """
	else:
	    dbsExceptionHandler("dbsException-invalid-input", "invalid value for validFileOnly.", self.logger.exception)
		
        if block_name:
            if isinstance(block_name, list):
                dbsExceptionHandler('dbsException-invalid-input', 'Input block_name is a list instead of string.', self.logger.exception)
            sql += " AND B.BLOCK_NAME = :block_name"
            binds.update({"block_name":block_name})
        if logical_file_name:
	    if type(logical_file_name) is not list:
		op = ("=", "like")["%" in logical_file_name]
		sql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op
		binds.update({"logical_file_name":logical_file_name})
	    if type(logical_file_name) is list:
		ds_generator, binds2 = create_token_generator(logical_file_name)
		binds.update(binds2)
		sql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
                lfn_generator = "{ds_generator}".format(ds_generator=ds_generator)
        if dataset:
            if isinstance(dataset, list):
                dbsExceptionHandler('dbsException-invalid-input', 'Input dataset is a list instead of string.', self.logger.exception)
            sql += " AND D.DATASET = :dataset"
            binds.update({"dataset":dataset})
        if release_version:
            op = ("=", "like")["%" in release_version]
            sql += " AND RV.RELEASE_VERSION %s :release_version" % op
            binds.update({"release_version":release_version})
        if pset_hash:
            op = ("=", "like")["%" in pset_hash]
            sql += " AND PSH.PSET_HASH %s :pset_hash" % op
            binds.update({"pset_hash" :pset_hash})
        if app_name:
            op = ("=", "like")["%" in app_name]
            sql += " AND AEX.APP_NAME %s :app_name" % op
            binds.update({"app_name":  app_name})
        if output_module_label:
            op = ("=", "like")["%" in output_module_label]
            sql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label" %op
            binds.update({"output_module_label":output_module_label})
        if (origin_site_name):
            op = ("=", "like")["%" in origin_site_name]
            sql += " AND B.ORIGIN_SITE_NAME %s  :origin_site_name" % op
            binds.update({"origin_site_name":origin_site_name})
        if run_num != -1 and run_num : # elimnate empty list run_num=[]
            run_list=[]
            wheresql_run_list=''
            wheresql_run_range=''
	    wheresql_run_range_ct = 0
	    try:
		run_num = long(run_num)
		sql += " and FL.RUN_NUM = :run_num "
		binds.update({"run_num":run_num})
	    except:
		if isinstance(run_num, basestring):
                    for r in parseRunRange(run_num):
                        if isinstance(r, run_tuple):
                            if r[0] == r[1]:
                                dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
					self.logger.exception)
                            if not lumi_list:
                                if wheresql_run_range_ct >0 :
                                    wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2)
                                binds.update({"minrun%s"%wheresql_run_range_ct :r[0]})
                                binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]})
                                wheresql_run_range_ct += 1
                            else:
                                dbsExceptionHandler('dbsException-invalid-input', 
				"When lumi_list is given, only one run is allowed.", self.logger.exception)
                        else:
                            dbsExceptionHandler('dbsException-invalid-input', 
			    "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. ", self.logger.exception)
		elif type(run_num) is list and len(run_num)==1:
		    try:
			run_num = long(run_num[0])
			sql += " and FL.RUN_NUM = :run_num "
                        binds.update({"run_num":run_num})	
		    except:
			for r in parseRunRange(run_num):
			    if isinstance(r, run_tuple):
				if r[0] == r[1]:
				    dbsExceptionHandler('dbsException-invalid-input', 
				        "DBS run range must be apart at least by 1.", self.logger.exception)
				if not lumi_list:
				    if wheresql_run_range_ct >0 :
                                        wheresql_run_range += " or "
				    wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2)
                                    binds.update({"minrun%s"%wheresql_run_range_ct :r[0]})
                                    binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]})
                                    wheresql_run_range_ct += 1	
                                else:
                                    dbsExceptionHandler('dbsException-invalid-input', 
					"When lumi_list is given, only one run is allowed.", self.logger.exception)
			    else:
				dbsExceptionHandler('dbsException-invalid-input', 
				"run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']",
				self.logger.exception)	
		else:		
		    for r in parseRunRange(run_num):
			if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
			    run_list.append(str(r))
			if isinstance(r, run_tuple):
			    if r[0] == r[1]:
				dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
				self.logger.exception)
			    if not lumi_list:
				if wheresql_run_range_ct >0 :
				    wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2)
				binds.update({"minrun%s"%wheresql_run_range_ct :r[0]})
				binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]})
				wheresql_run_range_ct += 1
			    else:
                                dbsExceptionHandler('dbsException-invalid-input', 
				"When lumi_list is given, only one run is allowed.", self.logger.exception)
            #
            if run_list and not lumi_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
		#sql =  "{run_generator}".format(run_generator=run_generator) + sql
		binds.update(run_binds)
	    if wheresql_run_range and wheresql_run_list:
		sql += " and (" + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " and " + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " and "  + wheresql_run_list
        # Make sure when we have a lumi_list, there is only ONE run  -- YG 14/05/2013
        if (lumi_list and len(lumi_list) != 0):
            if len(run_list) > 1:
                dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.",
					self.logger.exception)         
            sql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            sql_lumi = " FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            #Do I need to convert lumi_list to be a str list? YG 10/03/13
            #Yes, you do. YG
            lumi_list = map(str, lumi_list)
            lumi_generator, lumi_binds = create_token_generator(lumi_list)
            #sql_sel = "{lumi_generator}".format(lumi_generator=lumi_generator) + sql_sel
            binds.update(lumi_binds)
            #binds["run_num"]=run_list[0]
        #
	if (run_generator and lfn_generator) or (lumi_generator and lfn_generator):
	    dbsExceptionHandler('dbsException-invalid-input2', 
		"cannot supply more than one list (lfn, run_num or lumi) at one query", self.logger.exception,
		"dao/File/list cannot have more than one list (lfn, run_num, lumi) as input pareamters")	
	    # only one with and generators should be named differently for run and lfn.	
	    #sql = run_generator + lfn_generator + lumi_generator  + sql_sel + sql
	else:
            if run_num != -1 and int(sumOverLumi) == 1:                
                sql_sel = sql_sel.replace('F.EVENT_COUNT,', '')
                sql = \
                 'with myfiles as ( ' + sql_sel + sql + """) select mf.* , 
                            (case 
                                when badi.file_id = mc.file_id and badi.run_num=mc.run_num and badi.bid is null then null 
                                else  mc.event_count 
                             end) as event_count 
                     from myfiles mf,   
                          EVENT_COUNT_WITH_LUMI, 
                          ( 
                            select distinct fl.file_id, fl.run_num, null as bid 
                            from %sfile_lumis fl  
                            join myfiles my2 on my2.file_id=fl.file_id and my2.run_num=fl.run_num 
                            where fl.event_count is null 
                         )badi 
                    where mf.file_id= mc.file_id and mf.run_num=mc.run_num  

                    """%self.owner
                if not lumi_list:
                    ent_ct = """
                            (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num 
                            from %sfile_lumis fl 
                            join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num 
                            group by fl.file_id, fl.run_num) mc 
                        """%self.owner
                    sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct)
                else:
                    ent_ct = lumi_generator  + """ 
                            (select sum(fl.event_count) as event_count, fl.file_id, fl.run_num 
                            from %sfile_lumis fl 
                            join myfiles mf on mf.file_id=fl.file_id and mf.run_num=fl.run_num 
                            where sql_lumi  
                            group by fl.file_id, fl.run_num) mc 
                        """%self.owner
                    sql = sql.replace('EVENT_COUNT_WITH_LUMI', ent_ct)
            else:
                sql = run_generator + lfn_generator + lumi_generator  + sql_sel + sql
	self.logger.debug("SQL: " + sql)
        self.logger.debug("***********************")     	
	self.logger.debug(binds)
        try:
            self.logger.debug("******before cursor**********")   
            cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
            self.logger.debug("******after cursor**********")  
        except Exception as e :
            self.logger.error(str(e))
        for i in cursors:
            d = self.formatCursor(i, size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #7
0
    def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="",
                release_version="", pset_hash="", app_name="", output_module_label="",
                processing_version=0, acquisition_era="", run_num=-1,
                physics_group_name="", logical_file_name="", primary_ds_name="",
                primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", 
                prep_id="", create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0, ldate=0,
                transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/Dataset/BriefList.  Expects db connection from upper layer.")
	selectsql = 'SELECT '
	joinsql = ''
        generatedsql = ''
        binds = {}
	wheresql = 'WHERE D.IS_DATASET_VALID=:is_dataset_valid '
        if dataset and type(dataset) is list:  # for the POST method
            #wheresql += " AND D.DATASET=:dataset "
            ds_generator, binds = create_token_generator(dataset)
            wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)"
            generatedsql = "{ds_generator}".format(ds_generator=ds_generator)
            if dataset_access_type and (dataset_access_type !="%" or dataset_access_type != '*'):
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (self.owner)
                op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
        else: #for the GET method
            binds.update(is_dataset_valid=is_dataset_valid)
            if cdate != 0:
                wheresql += "AND D.CREATION_DATE = :cdate "
                binds.update(cdate = cdate)
            elif min_cdate != 0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate "
                binds.update(min_cdate = min_cdate)
                binds.update(max_cdate = max_cdate)
            elif min_cdate != 0 and max_cdate == 0:
                wheresql += "AND D.CREATION_DATE > :min_cdate "
                binds.update(min_cdate = min_cdate)
            elif min_cdate ==0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE < :max_cdate "
                binds.update(max_cdate = max_cdate)
            else:
                pass
            
            if ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate "
                binds.update(ldate = ldate)
            elif min_ldate != 0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate "
                binds.update(min_ldate = min_ldate)
                binds.update(max_ldate = max_ldate)
            elif min_ldate != 0 and max_ldate == 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate "
                binds.update(min_ldate = min_ldate)
            elif min_ldate ==0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate "
                binds.update(max_ldate = max_ldate)
            else:
                pass
            if create_by:
                wheresql +=  " AND D.CREATE_BY = :create_by "
                binds.update(create_by = create_by)
            if last_modified_by:
                wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by "
                binds.update(last_modified_by = last_modified_by)
            if prep_id:
                wheresql += "AND D.prep_id = :prep_id "
                binds.update(prep_id = prep_id)
            if dataset and type(dataset) is str and dataset != "%":
                op = ("=", "like")["%" in dataset]
                wheresql += " AND D.DATASET %s :dataset " % op
                binds.update(dataset = dataset)
            if primary_ds_name and primary_ds_name != "%":
                joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % (self.owner)
                op = ("=", "like")["%" in primary_ds_name ]
                wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op
                binds.update(primary_ds_name = primary_ds_name)
            if primary_ds_type and  primary_ds_type !="%":
                if not primary_ds_name:  
                    joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % (self.owner)
                joinsql += " JOIN %sPRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID " % (self.owner)
                op = ("=", "like")["%" in primary_ds_type]
                wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " %op
                binds.update(primary_ds_type=primary_ds_type)
            
            if processed_ds_name and processed_ds_name != "%":
                joinsql += " JOIN %sPROCESSED_DATASETS PR ON PR.PROCESSED_DS_ID = D.PROCESSED_DS_ID " % (self.owner)
                op = ("=", "like")["%" in processed_ds_name ]
                wheresql += " AND PR.PROCESSED_DS_NAME %s :processed_ds_name " % op
                binds.update(processed_ds_name = processed_ds_name)

            if data_tier_name and data_tier_name != "%":
                joinsql += " JOIN %sDATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID " % (self.owner)
                op = ("=", "like")["%" in data_tier_name ]
                wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op
                binds.update(data_tier_name=data_tier_name)

            if dataset_access_type and (dataset_access_type !="%" or dataset_access_type != '*'):
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (self.owner)
                op = ("=", "like")["%" in dataset_access_type or "*" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op
                binds.update(dataset_access_type=dataset_access_type)

            if physics_group_name and physics_group_name !="%":
                joinsql += " LEFT OUTER JOIN %sPHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID " % (self.owner)
                op = ("=", "like")["%" in physics_group_name]
                wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " %op
                binds.update(physics_group_name=physics_group_name)
   
            if parent_dataset:
                joinsql += """
                    LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID
                    LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID
                    """ % ((self.owner,)*2)
                wheresql += " AND PDS.DATASET = :parent_dataset "
                binds.update(parent_dataset = parent_dataset)

            if release_version or pset_hash or app_name or output_module_label:
                joinsql += """
                    LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID 
                    LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID  
                """ % ((self.owner,)*2)
	    
            if release_version:
                joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID " % (self.owner)
                op = ("=", "like")["%" in release_version]
                wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op
                binds.update(release_version=release_version)

            if pset_hash:
                joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID " % (self.owner)
                op = ("=", "like")["%" in pset_hash]
                wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op
                binds.update(pset_hash = pset_hash)

            if app_name:
                joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID " % (self.owner)
                op = ("=", "like")["%" in app_name]
                wheresql += " AND AEX.APP_NAME %s :app_name " % op
                binds.update(app_name = app_name)

            if output_module_label:
                op = ("=", "like")["%" in output_module_label]
                wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label " % op
                binds.update(output_module_label=output_module_label)

            if processing_version != 0:
                joinsql += " LEFT OUTER JOIN %sPROCESSING_ERAS PE ON PE.PROCESSING_ERA_ID = D.PROCESSING_ERA_ID " % (self.owner)
                #op = ("=", "like")["%" in processing_version]
                op = "="
                wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op
                binds.update(pversion=processing_version)

            if acquisition_era:
                joinsql += " LEFT OUTER JOIN %sACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID " % (self.owner)
                op = ("=", "like")["%" in acquisition_era]
                wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op
                binds.update(aera=acquisition_era)

            if logical_file_name and logical_file_name !="%":
                selectsql += "DISTINCT "
                joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner
                wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name "
                binds.update(logical_file_name = logical_file_name)
            #
            if  run_num != -1:
                if not logical_file_name:
                    selectsql += "DISTINCT "
                    joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % (self.owner)
                joinsql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % (self.owner)
                run_list = []
                wheresql_run_list=''
                wheresql_run_range=''
                for r in parseRunRange(run_num):
                    if isinstance(r, str) or isinstance(r, int)  or isinstance(r, long):
                        run_list.append(str(r))
                    if isinstance(r, run_tuple):
                        if r[0] == r[1]:
                            dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                        wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun "
                        binds.update({"minrun":r[0]})
                        binds.update({"maxrun":r[1]})
                # 
                if run_list:
                    wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                    run_generator, run_binds = create_token_generator(run_list)
                    generatedsql =  "{run_generator}".format(run_generator=run_generator) 
                    binds.update(run_binds)
                if wheresql_run_range and wheresql_run_list:
                    wheresql += " and ("   + wheresql_run_range + " or " +  wheresql_run_list + " )"
                elif wheresql_run_range and not wheresql_run_list:
                    wheresql += " and "  + wheresql_run_range
                elif not wheresql_run_range and wheresql_run_list:
                    wheresql += " and " + wheresql_run_list

	sql = "".join((generatedsql, selectsql, self.basesql, joinsql, wheresql)) 
	#self.logger.debug( sql)
        #self.logger.debug( binds)
        cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        result = []
        for i in cursors:
            d = self.formatCursor(i)
            if d:
                result += d
        return result
Beispiel #8
0
Datei: List.py Projekt: dmwm/DBS
    def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="",\
                release_version="", pset_hash="", app_name="", output_module_label="",\
                global_tag="", processing_version=0, acquisition_era="", run_num=-1,\
                physics_group_name="", logical_file_name="", primary_ds_name="",\
                primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", prep_id="",\
                create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0,\
                ldate=0, dataset_id=-1, transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-failed-connect2host", "%s Oracle/Dataset/List.  Expects db connection from upper layer.", self.logger.exception)
        sql = ""
        generatedsql = ''
        basesql=self.basesql
        binds = {}
        wheresql = "WHERE D.IS_DATASET_VALID = :is_dataset_valid " 
        if dataset and type(dataset) is list:  # for the POST method
            ds_generator, binds2 = create_token_generator(dataset)
	    binds.update(binds2)	
            wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)"
            generatedsql = "{ds_generator}".format(ds_generator=ds_generator)
            if dataset_access_type and dataset_access_type !="%":
                op = ("=", "like")["%" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
            sql =generatedsql + 'SELECT' + basesql + wheresql
        elif type(dataset_id) is not int:  #for the POST method
	    #we treat the datset_id is the same way as run_num. It can be id1-id2, id or [id1,2,3 ...]
            dataset_id_list = []
            wheresql_dataset_id_list=''
            wheresql_dataset_id_range=''
            for id in parseRunRange(dataset_id):
                if isinstance(id, basestring) or isinstance(id, int) or isinstance(id, long):
                    dataset_id_list.append(str(id))
                if isinstance(id, run_tuple):
                    if id[0] == id[1]:
			dbsExceptionHandler('dbsException-invalid-input', "DBS dataset_id range must be apart at least by 1.", self.logger.exception)
		    wheresql_dataset_id_range = " D.DATASET_ID between :minid and :maxid "
		    binds.update({"minid":id[0]})
		    binds.update({"maxid":id[1]})
	    if dataset_id_list:	
		    ds_generator, binds2 = create_token_generator(dataset_id_list)
		    binds.update(binds2)	
		    wheresql_dataset_id_list = " D.DATASET_ID in (SELECT TOKEN FROM TOKEN_GENERATOR)"
                    generatedsql = "{ds_generator}".format(ds_generator=ds_generator)
            if dataset_access_type and dataset_access_type !="%":
                op = ("=", "like")["%" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op 
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
	    if wheresql_dataset_id_list and wheresql_dataset_id_range:
	        sql = generatedsql + 'SELECT' + basesql + wheresql + " and  ( "  \
                    +  wheresql_dataset_id_list + " or " + wheresql_dataset_id_range + " )"
	    elif wheresql_dataset_id_list and not wheresql_dataset_id_range:
		sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_list
	    elif not wheresql_dataset_id_list and wheresql_dataset_id_range:
		sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_range	
        else: #for the GET method
            binds.update(is_dataset_valid = is_dataset_valid)
            if cdate != 0:
                wheresql += "AND D.CREATION_DATE = :cdate "
                binds.update(cdate = cdate)
            elif min_cdate != 0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate "
                binds.update(min_cdate = min_cdate)
                binds.update(max_cdate = max_cdate)
            elif min_cdate != 0 and max_cdate == 0:
                wheresql += "AND D.CREATION_DATE > :min_cdate "
                binds.update(min_cdate = min_cdate)
            elif min_cdate ==0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE < :max_cdate "
                binds.update(max_cdate = max_cdate)
            else:
                pass
            if ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate "
                binds.update(ldate = ldate)
            elif min_ldate != 0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate "
                binds.update(min_ldate = min_ldate)
                binds.update(max_ldate = max_ldate)
            elif min_ldate != 0 and max_ldate == 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate "
                binds.update(min_ldate = min_ldate)
            elif min_ldate ==0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate "
                binds.update(max_ldate = max_ldate)
            else:
                pass
            if create_by:
                wheresql +=  " AND D.CREATE_BY = :create_by "
                binds.update(create_by = create_by)
            if last_modified_by:
                wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by "
                binds.update(last_modified_by = last_modified_by)
            if prep_id:
                wheresql += "AND D.PREP_ID = :prep_id "
                binds.update(prep_id = prep_id)
            if dataset and dataset != "%":
               op = ("=", "like")["%" in dataset]
               wheresql += " AND D.DATASET %s :dataset " % op
               binds.update(dataset = dataset)
            if dataset_id != -1:
                wheresql += " AND D.DATASET_ID = :dataset_id "
                binds.update(dataset_id = dataset_id)
            if primary_ds_name and primary_ds_name != "%":
               op = ("=", "like")["%" in primary_ds_name ]
               wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op
               binds.update(primary_ds_name = primary_ds_name)
            if processed_ds_name and processed_ds_name != "%":
                op = ("=", "like")["%" in processed_ds_name ]
                wheresql += " AND PD.PROCESSED_DS_NAME %s :processed_ds_name " % op
                binds.update(processed_ds_name = processed_ds_name)
            if data_tier_name and data_tier_name != "%":
               op = ("=", "like")["%" in data_tier_name ]
               wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op
               binds.update(data_tier_name=data_tier_name)
            if dataset_access_type and dataset_access_type !="%":
               op = ("=", "like")["%" in dataset_access_type]
               wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " %op
               binds.update(dataset_access_type=dataset_access_type)
            if primary_ds_type and  primary_ds_type !="%":
               op = ("=", "like")["%" in primary_ds_type]
               wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " %op
               binds.update(primary_ds_type=primary_ds_type)
            if physics_group_name and physics_group_name !="%":
               op = ("=", "like")["%" in physics_group_name]
               wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " %op
               binds.update(physics_group_name=physics_group_name)
       
            if parent_dataset:
    	    
                basesql = "PDS.DATASET PARENT_DATASET, " + basesql
                basesql += """
    		LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID 
    		LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID 
    		""" % ((self.owner,)*2)
                wheresql += " AND PDS.DATASET = :parent_dataset "
                binds.update(parent_dataset = parent_dataset)
    
            if release_version or pset_hash or app_name or output_module_label:
                basesql = """
    			 OMC.OUTPUT_MODULE_LABEL, OMC.GLOBAL_TAG, 
    			 RV.RELEASE_VERSION,
    			 PSH.PSET_HASH,
    			 AEX.APP_NAME, """ + basesql
     
                basesql += """
    		LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID
    		LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID
    		LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID
    		LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID
    		LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID 
                """ % ((self.owner,)*5)
    	    
            if release_version:
                op = ("=", "like")["%" in release_version]
                wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op
                binds.update(release_version=release_version)
            if pset_hash:
                op = ("=", "like")["%" in pset_hash]
                wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op
                binds.update(pset_hash = pset_hash)
            if app_name:
                op = ("=", "like")["%" in app_name]
                wheresql += " AND AEX.APP_NAME %s :app_name " % op
                binds.update(app_name = app_name)
            if output_module_label:
                op = ("=", "like")["%" in output_module_label]
                wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label " % op
                binds.update(output_module_label=output_module_label)
            if global_tag:
                op = ("=", "like")["%" in global_tag]
                wheresql += " AND OMC.GLOBAL_TAG  %s :global_tag " % op
                binds.update(global_tag=global_tag)
            if processing_version != 0:
                #op = ("=", "like")["%" in processing_version]
                op = "="
                wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op
                binds.update(pversion=processing_version)
            if acquisition_era:
                op = ("=", "like")["%" in acquisition_era]
                wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op
                binds.update(aera=acquisition_era)
    	
            # This should resolve to original cases that were in the business logic
            if (not logical_file_name or  logical_file_name=="%") and (run_num==-1):
    		# """JUST EXECUTE THE QUERY HERE"""
                sql = "SELECT " + basesql + wheresql 
    	
            elif (run_num==-1) and logical_file_name and logical_file_name !="%":
                # """DO execute 1 thingy"""
    		sql = "SELECT DISTINCT " + basesql
    		sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner
    		wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name "
    		binds.update(logical_file_name = logical_file_name)
    		sql += wheresql
    
            elif(run_num != -1 ):
                # """Do execute 2 thingy"""
    		sql += "SELECT DISTINCT " + basesql
    		if logical_file_name:
    			sql += "JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner
    			wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name "
    			binds.update(logical_file_name = logical_file_name)
    		else:
    		    sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % (self.owner)
                #
                sql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % (self.owner)
    		run_list = []
                wheresql_run_list=''
                wheresql_run_range=''
                for r in parseRunRange(run_num):
                    if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
                        run_list.append(str(r))
                    if isinstance(r, run_tuple):
                        if r[0] == r[1]:
                            dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception)
                        wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun "
                        binds.update({"minrun":r[0]})
                        binds.update({"maxrun":r[1]})
                #
                if run_list:
                    wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                    run_generator, run_binds = create_token_generator(run_list)
                    sql =  "{run_generator}".format(run_generator=run_generator) + sql
                    binds.update(run_binds)
                if wheresql_run_range and wheresql_run_list:
                    wheresql += " and ("   + wheresql_run_range + " or " +  wheresql_run_list + " )"
                elif wheresql_run_range and not wheresql_run_list:
                    wheresql += " and "  + wheresql_run_range
                elif not wheresql_run_range and wheresql_run_list:
                    wheresql += " and " + wheresql_run_list

    		sql += wheresql
            else:
                dbsExceptionHandler("dbsException-invalid-input", "Oracle/Dataset/List. Proper parameters are not\
                    provided for listDatasets call.", self.logger.exception)
        #self.logger.error( sql)
        #self.logger.error("binds=%s" %binds)
        cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        for i in cursors:
            d = self.formatCursor(i,size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #9
0
    def execute(self,
                conn,
                logical_file_name='',
                block_name='',
                run_num=-1,
                validFileOnly=0,
                migration=False):
        """
        Lists lumi section numbers with in a file, a list of files or a block.
        """
        sql = ""
        wheresql = ""
        lfn_generator = ""
        run_generator = ""
        if not conn:
            dbsExceptionHandler(
                "dbsException-db-conn-failed",
                "Oracle/FileLumi/List. Expects db connection from upper layer."
            )
        if logical_file_name and not isinstance(logical_file_name, list):
            binds = {'logical_file_name': logical_file_name}
            if int(validFileOnly) == 0:
                if migration:  #migration always call with single file and include all files no matter valid or not.
                    sql = self.sql + """ FROM {owner}FILE_LUMIS FL 
				     JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID 
				     WHERE F.LOGICAL_FILE_NAME = :logical_file_name 
				  """.format(owner=self.owner)
                else:
                    sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL
				    JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID 
                                     WHERE F.LOGICAL_FILE_NAME = :logical_file_name 
                                  """.format(owner=self.owner)
            else:
                sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL 
				     JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
				     JOIN {owner}DATASETS D ON  D.DATASET_ID = F.DATASET_ID
				     JOIN {owner}DATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID		
				     WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME = :logical_file_name 
				     AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') 
				 """.format(owner=self.owner)
        elif logical_file_name and isinstance(logical_file_name, list):
            sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID """.format(
                owner=self.owner)
            lfn_generator, binds = create_token_generator(logical_file_name)
            if int(validFileOnly) == 0:
                wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
            else:
                sql = sql + """ JOIN {owner}DATASETS D ON  D.DATASET_ID = F.DATASET_ID 
				JOIN {owner}DATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID
		            """.format(owner=self.owner)
                wheresql = """ WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR) 
			       AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')
			   """
            sql = "{lfn_generator} {sql} {wheresql}".format(
                lfn_generator=lfn_generator, sql=sql, wheresql=wheresql)
        elif block_name:
            binds = {'block_name': block_name}
            if int(validFileOnly) == 0:
                sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID  
				     JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID  
				     WHERE B.BLOCK_NAME = :block_name""".format(owner=self.owner)
            else:
                sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID 
				     JOIN {owner}DATASETS D ON  D.DATASET_ID = F.DATASET_ID 
				     JOIN {owner}DATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID 
				     JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID
				     WHERE F.IS_FILE_VALID = 1 AND B.BLOCK_NAME = :block_name 
				     AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')	
				""".format(owner=self.owner)
        else:
            dbsExceptionHandler(
                'dbsException-invalid-input2',
                "FileLumi/List: Either logocal_file_name or block_name must be provided.",
                self.logger.exception,
                "FileLumi/List: Either logocal_file_name or block_name must be provided."
            )
        #
        if run_num != -1:
            run_list = []
            wheresql_run_list = ''
            wheresql_run_range = ''
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(
                        r, int) or isinstance(r, long) or isinstance(r, str):
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler(
                            'dbsException-invalid-input2',
                            "DBS run range must be apart at least by 1.",
                            self.logger.exception,
                            "DBS run range must be apart at least by 1.")
                    wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun": r[0]})
                    binds.update({"maxrun": r[1]})
            #
            if run_list:
                if len(run_list) == 1:
                    wheresql_run_list = " fl.RUN_NUM = :single_run "
                    binds.update({"single_run": long(run_list[0])})

                else:
                    wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                    run_generator, run_binds = create_token_generator(run_list)
                    sql = "{run_generator}".format(
                        run_generator=run_generator) + sql
                    binds.update(run_binds)

            if wheresql_run_range and wheresql_run_list:
                sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " and " + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " and " + wheresql_run_list
        self.logger.debug(sql)
        self.logger.debug(binds)
        if run_generator and lfn_generator:
            dbsExceptionHandler(
                'dbsException-invalid-input2',
                "listFileLumiArray support single list of lfn or run_num. ",
                self.logger.exception,
                "listFileLumiArray support single list of lfn or run_num. ")
        cursors = self.dbi.processData(sql,
                                       binds,
                                       conn,
                                       transaction=False,
                                       returnCursor=True)
        result = []
        file_run_lumi = {}
        for i in cursors:
            result.extend(self.formatCursor(i))
        #for migration, we need flat format to load the data into another DB.
        if migration:
            #YG 09/2015.
            for item in result:
                yield item
        else:
            for i in result:
                r = i['run_num']
                f = i['logical_file_name']
                file_run_lumi.setdefault((f, r),
                                         []).append(i['lumi_section_num'])
            for k, v in file_run_lumi.iteritems():
                yield {
                    'logical_file_name': k[0],
                    'run_num': k[1],
                    'lumi_section_num': v
                }
        del file_run_lumi
        del result
Beispiel #10
0
    def execute(self, conn, dataset="", block_name="", logical_file_name="",
            release_version="", pset_hash="", app_name="", output_module_label="",
	    run_num=-1, origin_site_name="", lumi_list=[], validFileOnly=0, transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/BriefList. Expects db connection from upper layer.")
	#import pdb
	#pdb.set_trace()
        binds = {}
	run_generator = ''
	lfn_generator = ''
	lumi_generator = ''
	generatedsql = ''
	basesql = self.sql
	joinsql = ''
        # Check if file is valid. YG 1/29/2015
        if int(validFileOnly) == 0:
            wheresql = " WHERE F.IS_FILE_VALID <> -1 "
        elif int(validFileOnly) == 1:
            wheresql = " WHERE F.IS_FILE_VALID = 1 "
	else:
	    dbsExceptionHandler("dbsException-invalid-input", "invalid value for validFileOnly.")	
        if logical_file_name:
	    if type(logical_file_name) is not list: #for GET call
		op = ("=", "like")["%" in logical_file_name] 
		wheresql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name " %op
		binds.update({"logical_file_name":logical_file_name})
	    elif type(logical_file_name) is list:  #for POST call
		lfn_generator, binds2 = create_token_generator(logical_file_name)
		binds.update(binds2)
		wheresql += " AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
		#generatedsql = "{lfn_generator}".format(lfn_generator=lfn_generator)		
            if not dataset:
		if int(validFileOnly) == 1:
                    joinsql += """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                                  JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner,)*2)
                    wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') " 
                else:
		    pass
        if block_name:
            joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID " % (self.owner)
            wheresql += " AND B.BLOCK_NAME = :block_name "
            binds.update({"block_name":block_name})
            if not dataset and int(validFileOnly) == 1:
                joinsql += """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                               JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner,)*2)
                wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') "
            else:
                pass

        if dataset: 
	    joinsql += """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID """ % (self.owner)
            wheresql += " AND D.DATASET = :dataset "
            binds.update({"dataset":dataset})
            if int(validFileOnly) == 1:
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID " % (self.owner)  
                wheresql += " AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') "
            else:
                pass

	if release_version or pset_hash or app_name or output_module_label:
	    joinsql += """
		LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID
		LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID 
	    """ % ((self.owner,)*2)
	    
	if release_version:
	    joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID" % (self.owner)
	    op = ("=", "like")["%" in release_version]
	    wheresql += " AND RV.RELEASE_VERSION %s :release_version" % op
	    binds.update(release_version=release_version)

        if pset_hash:
	    joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID" % (self.owner)
	    op = ("=", "like")["%" in pset_hash]
	    wheresql += " AND PSH.PSET_HASH %s :pset_hash" % op
	    binds.update(pset_hash = pset_hash)

        if app_name:
	    joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID" % (self.owner)
	    op = ("=", "like")["%" in app_name]
	    wheresql += " AND AEX.APP_NAME %s :app_name" % op
	    binds.update(app_name = app_name)

        if output_module_label:
	    op = ("=", "like")["%" in output_module_label]
	    wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label" % op
	    binds.update(output_module_label=output_module_label)

	if (origin_site_name):
	    if not block_name:
		joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID" % (self.owner)
	    op = ("=","like")["%" in origin_site_name]
    	    wheresql += " AND B.ORIGIN_SITE_NAME %s  :origin_site_name" % op 
	    binds.update({"origin_site_name":origin_site_name})
	
	if run_num != -1 and run_num : # elimnate empty list run_num=[]
	    basesql = basesql.replace("SELECT", "SELECT DISTINCT")	
	    joinsql += " JOIN %sFILE_LUMIS FL on  FL.FILE_ID=F.FILE_ID " %(self.owner)	
            run_list=[]
            wheresql_run_list=''
            wheresql_run_range=''
	    wheresql_run_range_ct = 0	
            try:
                run_num = long(run_num)
                wheresql += " and FL.RUN_NUM = :run_num "
                binds.update({"run_num":run_num})
            except:
		if isinstance(run_num, basestring):
		    for r in parseRunRange(run_num):
			if isinstance(r, run_tuple):
			    if r[0] == r[1]:
				dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
			    if not lumi_list:
				if wheresql_run_range_ct >0 :
				    wheresql_run_range += " or "
				wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2)
				binds.update({"minrun%s"%wheresql_run_range_ct :r[0]})
				binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]})
				wheresql_run_range_ct += 1  
			    else:
                                dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.")		
			else:
			    dbsExceptionHandler('dbsException-invalid-input', "Invalid run_num. if run_num input as a string, it has to be converted into a int/long or in format of 'run_min-run_max'. ")
                elif type(run_num) is list and len(run_num)==1:
                    try:
                        run_num = long(run_num[0])
			wheresql += " and FL.RUN_NUM = :run_num "
			binds.update({"run_num":run_num})
                    except:
                        for r in parseRunRange(run_num):
                            if isinstance(r, run_tuple):
                                if r[0] == r[1]:
                                    dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                                if not lumi_list:
				    if wheresql_run_range_ct >0 :
					wheresql_run_range += " or "
				    wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2)
				    binds.update({"minrun%s"%wheresql_run_range_ct :r[0]})
				    binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]})
				    wheresql_run_range_ct += 1 
                                else:
                                    dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.")
                            else:
                                dbsExceptionHandler('dbsException-invalid-input', "run_num as a list must be a number or a range str, such as ['10'], [10] or ['1-10']")
                else:
                    for r in parseRunRange(run_num):
                        if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
                            run_list.append(str(r))
                        if isinstance(r, run_tuple):
                            if r[0] == r[1]:
                                dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                            if not lumi_list:
				if wheresql_run_range_ct >0 :
					wheresql_run_range += " or "
                                wheresql_run_range += " FL.RUN_NUM between :minrun%s and :maxrun%s " %((wheresql_run_range_ct,)*2) 
                                binds.update({"minrun%s"%wheresql_run_range_ct :r[0]})
                                binds.update({"maxrun%s"%wheresql_run_range_ct :r[1]})
				wheresql_run_range_ct += 1
                            else:
                                dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.")	
	    if run_list and not lumi_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                binds.update(run_binds)
            if wheresql_run_range and wheresql_run_list:
                wheresql  += " and (" + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                wheresql += " and ( " + wheresql_run_range  + " ) "
            elif not wheresql_run_range and wheresql_run_list:
                wheresql += " and "  + wheresql_run_list

	# Make sure when we have a lumi_list, there is only ONE run  -- YG 14/05/2013
        if (lumi_list and len(lumi_list) != 0):
            if len(run_list) > 1:
                dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.")
            wheresql += " AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            #Do I need to convert lumi_list to be a str list? YG 10/03/13
            #Yes, you do. YG
            lumi_list = map(str, lumi_list)
            lumi_generator, lumi_binds = create_token_generator(lumi_list)
            binds.update(lumi_binds)
            #binds["run_num"]=run_list[0]
        #
        if (run_generator and lfn_generator )  or (lumi_generator and lfn_generator ):
	    self.logger.error( "run_generator : %s" %run_generator)
	    self.logger.error( "lfn_generator:  %s" %lfn_generator)
	    self.logger.error( "lumi_generator: %s " %lumi_generator)		
            dbsExceptionHandler('dbsException-invalid-input2', message="cannot supply more than one list (lfn, run_num or lumi) at one query.  ",
                                 serverError="dao/File/list cannot have more than one list (lfn, run_num or lumi) as input pareamters")
            # only one with and generators should be named differently for run and lfn. 
            #sql = run_generator + lfn_generator + sql_sel + sql
        else:
            sql = run_generator + lfn_generator + lumi_generator + basesql +  self.fromsql + joinsql + wheresql
	self.logger.debug( "sql=%s" %sql)
	self.logger.debug( "binds=%s" %binds)
	cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        result=[]
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
	return result
Beispiel #11
0
    def execute(self,
                conn,
                block_name="",
                dataset="",
                run_num=-1,
                validFileOnly=0,
                sumOverLumi=0,
                transaction=False):
        binds = {}
        whererun = ''
        run_list = []
        wheresql_run_list = ''
        wheresql_run_range = ''
        wheresql_isFileValid = ''
        join_valid_ds1 = ''
        join_valid_ds2 = ''
        join_bk_fl = ''
        sql = ''
        #
        if int(validFileOnly) == 1:
            wheresql_isFileValid = """ and f.is_file_valid = 1 and DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """
            join_valid_ds1 = """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                                JOIN %sDATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID      
                             """ % ((self.owner, ) * 2)
            join_valid_ds2 = """ JOIN %sDATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID      
                             """ % ((self.owner, ) * 1)
            join_bk_fl = """ join %sfiles f on f.block_id = b.block_id 
                             """ % ((self.owner, ) * 1)
        #
        if run_num != -1:
            #
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(r, (long, int)):
                    #if not wheresql_run_list:
                    #    wheresql_run_list = " fl.RUN_NUM = :run_list "
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler(
                            'dbsException-invalid-input',
                            "DBS run range must be apart at least by 1.",
                            self.logger.exception)
                    wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun": int(r[0])})
                    binds.update({"maxrun": int(r[1])})
            #
            if run_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                sql = "{run_generator}".format(run_generator=run_generator)
                binds.update(run_binds)
            if wheresql_run_list and wheresql_run_range:
                whererun = wheresql_run_range + " or " + wheresql_run_list
            elif wheresql_run_list:
                whererun = wheresql_run_list
            elif wheresql_run_range:
                whererun = wheresql_run_range
        self.logger.debug('sumOverLumi=%s' % sumOverLumi)
        if block_name:
            if run_num != -1:
                if int(sumOverLumi) == 0:
                    #
                    sql = sql +\
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                     and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} )
                    ) as num_file,
                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,
                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,
                     (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}files f on f.block_id=b.block_id {join_valid_ds1}
                     where b.block_name=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                     )as num_block,
                    (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} )
                    ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1)
                    binds.update({"block_name": block_name})
                elif int(sumOverLumi) == 1:
                    sql = sql +\
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                     and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} )
                    ) as num_file,
                    (
                     with myFiles as
                     (select file_id from {owner}files f 
                      join {owner}blocks b on b.block_id = f.block_id {join_valid_ds1}
                      where b.block_name=:block_name  {wheresql_isFileValid}
                     ) 
                    select sum(fl.event_count) event_count from {owner}file_lumis fl
                    join myFiles on myFiles.file_id = fl.file_id
                    where {whererun} and  
                    not exists (select fl2.file_id from {owner}file_lumis fl2
                               join myFiles on myFiles.file_id = fl2.file_id 
                               where fl2.event_count is null )
                    ) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,
                     (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}files f on f.block_id=b.block_id {join_valid_ds1}
                     where b.block_name=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                     )as num_block,
                    (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} )
                    ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1)
                    binds.update({"block_name": block_name})

            else:
                sql = """
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ) as file_size,

                    (select nvl(count(distinct b.block_id),0) from {owner}blocks b
                     {join_bk_fl}
                     {join_valid_ds1}
                     where b.block_name=:block_name {wheresql_isFileValid}
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f
                    join {owner}file_lumis l on l.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid})
                   ) as num_lumi
                   from dual
                    """.format(owner=self.owner,
                               wheresql_isFileValid=wheresql_isFileValid,
                               join_valid_ds1=join_valid_ds1,
                               join_bk_fl=join_bk_fl)
                binds.update({"block_name": block_name})

        elif dataset:
            if run_num != -1:
                if int(sumOverLumi) == 0:
                    sql = sql + \
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id  {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     join {owner}files f on f.block_id = b.block_id
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_block,

                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid} and {whererun} )
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2)
                    binds.update({"dataset": dataset})
                elif int(sumOverLumi) == 1:
                    sql = sql + \
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_file,
                    (
                    with myFiles as
                     (select file_id from {owner}files f 
                      join {owner}datasets d on d.dataset_id = f.dataset_id {join_valid_ds2}
                      where d.dataset=:dataset  {wheresql_isFileValid}
                     ) 
                    select sum(fl.event_count) event_count from {owner}file_lumis fl
                    join myFiles on myFiles.file_id = fl.file_id
                    where {whererun} and  
                    not exists (select fl2.file_id from {owner}file_lumis fl2
                               join myFiles on myFiles.file_id = fl2.file_id 
                               where fl2.event_count is null )
                    ) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     join {owner}files f on f.block_id = b.block_id
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_block,

                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid} and {whererun} )
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2)
                binds.update({"dataset": dataset})
            else:
                sql = """ \
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     {join_bk_fl}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}file_lumis l
                    join {owner}files f on f.file_id=l.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid})
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner,
                               wheresql_isFileValid=wheresql_isFileValid,
                               join_valid_ds2=join_valid_ds2,
                               join_bk_fl=join_bk_fl)
                binds.update({"dataset": dataset})
        else:
            return
        #self.logger.error(sql)
        #self.logger.error(binds)
        cursors = self.dbi.processData(sql,
                                       binds,
                                       conn,
                                       transaction,
                                       returnCursor=True)
        for i in cursors:
            d = self.formatCursor(i, size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #12
0
    def execute(self, conn, dataset="", block_name="", logical_file_name="",
            release_version="", pset_hash="", app_name="", output_module_label="",
	    run_num=-1, origin_site_name="", lumi_list=[], transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/BriefList. Expects db connection from upper layer.")

        binds = {}
	basesql = self.sql
	joinsql = ""
        # for the time being lests list all files
	wheresql = "WHERE F.IS_FILE_VALID <> -1"

        if logical_file_name:
	    op = ("=", "like")["%" in logical_file_name] 
            wheresql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op
            binds.update({"logical_file_name":logical_file_name})

        if block_name:
	    joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID" % (self.owner)
            wheresql += " AND B.BLOCK_NAME = :block_name"
            binds.update({"block_name":block_name})

        if dataset: 
	    joinsql += " JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID " % (self.owner)
            wheresql += " AND D.DATASET = :dataset"
            binds.update({"dataset":dataset})


	if release_version or pset_hash or app_name or output_module_label:
	    joinsql += """
		LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID
		LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID 
	    """ % ((self.owner,)*2)
	    
	if release_version:
	    joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID" % (self.owner)
	    op = ("=", "like")["%" in release_version]
	    wheresql += " AND RV.RELEASE_VERSION %s :release_version" % op
	    binds.update(release_version=release_version)

        if pset_hash:
	    joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID" % (self.owner)
	    op = ("=", "like")["%" in pset_hash]
	    wheresql += " AND PSH.PSET_HASH %s :pset_hash" % op
	    binds.update(pset_hash = pset_hash)

        if app_name:
	    joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID" % (self.owner)
	    op = ("=", "like")["%" in app_name]
	    wheresql += " AND AEX.APP_NAME %s :app_name" % op
	    binds.update(app_name = app_name)

        if output_module_label:
	    op = ("=", "like")["%" in output_module_label]
	    wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label" % op
	    binds.update(output_module_label=output_module_label)

	if (origin_site_name):
	    if not block_name:
		joinsql += " JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID" % (self.owner)
	    op = ("=","like")["%" in origin_site_name]
    	    wheresql += " AND B.ORIGIN_SITE_NAME %s  :origin_site_name" % op 
	    binds.update({"origin_site_name":origin_site_name})

        if run_num != -1 :
            basesql = basesql.replace("SELECT", "SELECT DISTINCT") + " , FL.RUN_NUM"
            joinsql += " JOIN %sFILE_LUMIS FL on  FL.FILE_ID=F.FILE_ID " %(self.owner)
            #
            run_list = []
            wheresql_run_list = ''
            wheresql_run_range = ''
            #
            for r in parseRunRange(run_num):
                if isinstance(r, str) or isinstance(r, int) or isinstance(r, long):
                    if not wheresql_run_list:
                        wheresql_run_list = " FL.RUN_NUM = :run_list "
                    run_list.append(r)
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                    wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            # 
            if wheresql_run_range and len(run_list) >= 1:
                wheresql += " and (" + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not run_list:
                wheresql += " and " + wheresql_run_range
            elif not wheresql_run_range and len(run_list) >= 1:
                wheresql += " and "  + wheresql_run_list
            # Any List binding, such as "in :run_list"  or "in :lumi_list" must be the last binding. YG. 22/05/2013
            if len(run_list) == 1:
                binds["run_list"] = run_list[0]
            elif len(run_list) > 1:
                newbinds = []
                for r in run_list:
                    b = {}
                    b.update(binds)
                    b["run_list"] = r
                    newbinds.append(b)
                binds = newbinds
        # Make sure when we have a lumi_list, there is only ONE run  -- YG 14/05/2013

        # KEEP lumi_list as the LAST CHECK in this DAO, this is a MUST ---  ANZAR 08/23/2010
        if (lumi_list and len(lumi_list) != 0):
            wheresql += " AND FL.LUMI_SECTION_NUM in ( "
	    counter=0
            for alumi in lumi_list:
		if counter>0:
		    wheresql += ","
		wheresql += ":lumi_b%s" %counter
		binds.update({"lumi_b%s" %counter : alumi})
		counter+=1
	    wheresql += ")"

	sql = " ".join((basesql, self.fromsql, joinsql, wheresql))
	#print "sql=%s" %sql
	#print "binds=%s" %binds
	cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        result=[]
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
	return result
Beispiel #13
0
    def execute(self, conn, block_name="", dataset="",  run_num=-1, validFileOnly=0, sumOverLumi=0, transaction=False):
        binds = {}
        whererun = ''
        run_list = []
        wheresql_run_list = ''
        wheresql_run_range = ''
        wheresql_isFileValid = ''
	join_valid_ds1 = ''
	join_valid_ds2 = ''
        join_bk_fl = ''
        sql = ''
        #
	if int(validFileOnly) == 1 :
            wheresql_isFileValid =""" and f.is_file_valid = 1 and DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """
            join_valid_ds1 = """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                                JOIN %sDATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID      
                             """% ((self.owner,)*2)  
            join_valid_ds2 = """ JOIN %sDATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID      
                             """% ((self.owner,)*1)
            join_bk_fl     = """ join %sfiles f on f.block_id = b.block_id 
                             """ % ((self.owner,)*1)
        #
        if run_num != -1:
            #
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(r, (long, int)):
                    #if not wheresql_run_list:
                    #    wheresql_run_list = " fl.RUN_NUM = :run_list "
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
				self.logger.exception)
                    wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            #
            if run_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                sql =  "{run_generator}".format(run_generator=run_generator)
                binds.update(run_binds)
            if wheresql_run_list and wheresql_run_range:
                whererun = wheresql_run_range + " or " + wheresql_run_list
            elif wheresql_run_list:
                whererun = wheresql_run_list
            elif wheresql_run_range:
                whererun = wheresql_run_range
        self.logger.debug('sumOverLumi=%s' %sumOverLumi)
        if block_name:
            if run_num != -1:
                if int(sumOverLumi) == 0:
                #
                    sql = sql +\
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                     and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} )
                    ) as num_file,
                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,
                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,
                     (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}files f on f.block_id=b.block_id {join_valid_ds1}
                     where b.block_name=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                     )as num_block,
                    (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} )
                    ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1)
                    binds.update({"block_name":block_name})
                elif int(sumOverLumi) == 1:
                    sql = sql +\
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                     and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} )
                    ) as num_file,
                    (
                     with myFiles as
                     (select file_id from {owner}files f 
                      join {owner}blocks b on b.block_id = f.block_id {join_valid_ds1}
                      where b.block_name=:block_name  {wheresql_isFileValid}
                     ) 
                    select sum(fl.event_count) event_count from {owner}file_lumis fl
                    join myFiles on myFiles.file_id = fl.file_id
                    where {whererun} and  
                    not exists (select fl2.file_id from {owner}file_lumis fl2
                               join myFiles on myFiles.file_id = fl2.file_id 
                               where fl2.event_count is null )
                    ) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,
                     (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}files f on f.block_id=b.block_id {join_valid_ds1}
                     where b.block_name=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                     )as num_block,
                    (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} )
                    ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1)
                    binds.update({"block_name":block_name})

            else:
                sql = """
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ) as file_size,

                    (select nvl(count(distinct b.block_id),0) from {owner}blocks b
                     {join_bk_fl}
                     {join_valid_ds1}
                     where b.block_name=:block_name {wheresql_isFileValid}
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f
                    join {owner}file_lumis l on l.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid})
                   ) as num_lumi
                   from dual
                    """ .format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1, join_bk_fl=join_bk_fl)
                binds.update({"block_name":block_name}) 

        elif dataset:
            if run_num != -1:
                if int(sumOverLumi) == 0:
                    sql = sql + \
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id  {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     join {owner}files f on f.block_id = b.block_id
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_block,

                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid} and {whererun} )
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2)
                    binds.update({"dataset":dataset})
                elif int(sumOverLumi) == 1:
                    sql = sql + \
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_file,
                    (
                    with myFiles as
                     (select file_id from {owner}files f 
                      join {owner}datasets d on d.dataset_id = f.dataset_id {join_valid_ds2}
                      where d.dataset=:dataset  {wheresql_isFileValid}
                     ) 
                    select sum(fl.event_count) event_count from {owner}file_lumis fl
                    join myFiles on myFiles.file_id = fl.file_id
                    where {whererun} and  
                    not exists (select fl2.file_id from {owner}file_lumis fl2
                               join myFiles on myFiles.file_id = fl2.file_id 
                               where fl2.event_count is null )
                    ) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     join {owner}files f on f.block_id = b.block_id
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_block,

                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid} and {whererun} )
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2)
                binds.update({"dataset":dataset})
            else:
                sql = """ \
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     {join_bk_fl}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}file_lumis l
                    join {owner}files f on f.file_id=l.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid})
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2, join_bk_fl=join_bk_fl)
                binds.update({"dataset":dataset})
        else:
            return 
        #self.logger.error(sql)
        #self.logger.error(binds)
	cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        for i in cursors:
            d = self.formatCursor(i, size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #14
0
    def execute(self, conn, dataset="", block_name="", data_tier_name="", origin_site_name="", logical_file_name="",
                run_num=-1, min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0,
                ldate=0, open_for_writing=-1, transaction = False):
        """
        dataset: /a/b/c
        block: /a/b/c#d
        """
        binds = {}

        basesql = self.sql
        joinsql = ""
        wheresql = ""
        generatedsql = ""

        if logical_file_name and logical_file_name != "%":
            joinsql +=  " JOIN %sFILES FL ON FL.BLOCK_ID = B.BLOCK_ID " %(self.owner)
            op =  ("=", "like")["%" in logical_file_name]
            wheresql +=  " WHERE LOGICAL_FILE_NAME %s :logical_file_name " % op
            binds.update( logical_file_name = logical_file_name )

        if  block_name and  block_name !="%":
            andorwhere = ("WHERE", "AND")[bool(wheresql)]
            op =  ("=", "like")["%" in block_name]
            wheresql +=  " %s B.BLOCK_NAME %s :block_name " % ((andorwhere, op))
            binds.update( block_name = block_name )

        if data_tier_name or (dataset and dataset!="%"):
            joinsql += "JOIN %sDATASETS DS ON DS.DATASET_ID = B.DATASET_ID "  % (self.owner)
            andorwhere = ("WHERE", "AND")[bool(wheresql)]
            if dataset:
                op = ("=", "like")["%" in dataset]
                wheresql += " %s DS.DATASET %s :dataset " % ((andorwhere, op))
                binds.update(dataset=dataset)
            if data_tier_name:
                joinsql += "JOIN {owner}DATA_TIERS DT ON DS.DATA_TIER_ID=DT.DATA_TIER_ID ".format(owner=self.owner)
                wheresql += " %s DT.DATA_TIER_NAME=:data_tier_name " % (andorwhere)
                binds.update(data_tier_name=data_tier_name)

        if origin_site_name and  origin_site_name != "%":
            op = ("=", "like")["%" in origin_site_name]
            wheresql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name " % op
            binds.update(origin_site_name = origin_site_name)

        if open_for_writing == 0 or open_for_writing == 1:
            wheresql += " AND B.OPEN_FOR_WRITTING = :open_for_writing "

        if cdate != 0:
            wheresql += "AND B.CREATION_DATE = :cdate "
            binds.update(cdate = cdate)
        elif min_cdate != 0 and max_cdate != 0:
            wheresql += "AND B.CREATION_DATE BETWEEN :min_cdate and :max_cdate "
            binds.update(min_cdate = min_cdate)
            binds.update(max_cdate = max_cdate)
        elif min_cdate != 0 and max_cdate == 0:
            wheresql += "AND B.CREATION_DATE > :min_cdate "
            binds.update(min_cdate = min_cdate)
        elif min_cdate ==0 and max_cdate != 0:
            wheresql += "AND B.CREATION_DATE < :max_cdate "
            binds.update(max_cdate = max_cdate)
        else:
            pass
        if ldate != 0:
            wheresql += "AND B.LAST_MODIFICATION_DATE = :ldate "
            binds.update(ldate = ldate)
        elif min_ldate != 0 and max_ldate != 0:
            wheresql += "AND B.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate "
            binds.update(min_ldate = min_ldate)
            binds.update(max_ldate = max_ldate)
        elif min_ldate != 0 and max_ldate == 0:
            wheresql += "AND B.LAST_MODIFICATION_DATE > :min_ldate "
            binds.update(min_ldate = min_ldate)
        elif min_cdate ==0 and max_cdate != 0:
            wheresql += "AND B.LAST_MODIFICATION_DATE < :max_ldate "
            binds.update(max_ldate = max_ldate)
        else:
            pass

        #one may provide a list of runs , so it has to be the last one in building the bind.
        if run_num !=-1 :
            basesql = basesql.replace("SELECT", "SELECT DISTINCT") + " , FLM.RUN_NUM  "
            if not logical_file_name:
                joinsql +=  " JOIN %sFILES FL ON FL.BLOCK_ID = B.BLOCK_ID " %(self.owner)
            joinsql += " JOIN %sFILE_LUMIS FLM on FLM.FILE_ID = FL.FILE_ID " %(self.owner)
            run_list=[]
            wheresql_run_list=''
            wheresql_run_range=''
            #
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.", self.logger.exception)
                    wheresql_run_range = " FLM.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            #
            if run_list:
                wheresql_run_list = " FLM.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                generatedsql, run_binds = create_token_generator(run_list)
                binds.update(run_binds)
            #  
            if wheresql_run_range and wheresql_run_list:
                wheresql += " and (" + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                wheresql +=  " and " + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                wheresql += " and "  + wheresql_run_list
        #
        sql = " ".join((generatedsql, basesql, self.fromsql, joinsql, wheresql))

        cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        for i in cursors:
            d = self.formatCursor(i, size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d 
Beispiel #15
0
    def execute(self, conn, block_name="", dataset="",  run_num=-1, validFileOnly=0, transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/SummaryList. Expects db connection from upper layer.")

        binds = {}
        whererun = ''
        run_list = []
        wheresql_run_list = ''
        wheresql_run_range = ''
        wheresql_isFileValid = ''
	join_valid_ds1 = ''
	join_valid_ds2 = ''
        sql = ''
        #
	if int(validFileOnly) == 1 :
            wheresql_isFileValid =""" and f.is_file_valid = 1 and DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') """
            join_valid_ds1 = """ JOIN %sDATASETS D ON  D.DATASET_ID = F.DATASET_ID
                                JOIN %sDATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID      
                             """% ((self.owner,)*2)  
            join_valid_ds2 = """ JOIN %sDATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID      
                             """% ((self.owner,)*1)
        #
        if run_num != -1:
            #
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(r, (long,int)):
                    #if not wheresql_run_list:
                    #    wheresql_run_list = " fl.RUN_NUM = :run_list "
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                    wheresql_run_range = " fl.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            #
            if run_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                sql =  "{run_generator}".format(run_generator=run_generator)
                binds.update(run_binds)
            if wheresql_run_list and wheresql_run_range:
                whererun = wheresql_run_range + " or " + wheresql_run_list
            elif wheresql_run_list:
                whererun = wheresql_run_list
            elif wheresql_run_range:
                whererun = wheresql_run_range
        if block_name:
            if run_num != -1:
                #
                sql = sql +\
                   """
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                     and f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun} )
                    ) as num_file,
                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,
                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,
                   (select count(distinct b.block_id) from {owner}blocks b
                   join {owner}files f on f.block_id=b.block_id {join_valid_ds1}
                   where b.block_name=:block_name {wheresql_isFileValid} and
                   f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                   )as num_block,
                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid} and {whererun} )
                   ) as num_lumi
                   from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1)
                binds.update({"block_name":block_name})
            else:
                sql = """
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                     where b.BLOCK_NAME=:block_name {wheresql_isFileValid}
                    ) as file_size,

                    (select count(block_id) from {owner}blocks where block_name=:block_name
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f
                    join {owner}file_lumis l on l.file_id=f.file_id
                    join {owner}blocks b on b.BLOCK_ID = f.block_id {join_valid_ds1}
                    where b.BLOCK_NAME=:block_name {wheresql_isFileValid})
                   ) as num_lumi
                   from dual
                    """ .format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds1=join_valid_ds1 )
                binds.update({"block_name":block_name})

        elif dataset:
            if run_num != -1:
                sql = sql + \
                    """
                    select
                    (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id  {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as file_size,

                    (select count(distinct b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     join {owner}files f on f.block_id = b.block_id
                     where d.dataset=:dataset {wheresql_isFileValid} and
                     f.FILE_ID in (select fl.file_id from {owner}file_lumis fl where {whererun})
                    ) as num_block,

                   (select count(*) from (select distinct fl.lumi_section_num, fl.run_num from {owner}files f
                    join {owner}file_lumis fl on fl.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid} and {whererun} )
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, whererun=whererun, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2)
                binds.update({"dataset":dataset})
            else:
                sql = """ \
                    select
                   (select count(f.file_id)  from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as num_file,

                    nvl((select sum(f.event_count) event_count from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ),0) as num_event,

                    (select nvl(sum(f.file_size),0) file_size from {owner}files f
                     join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset {wheresql_isFileValid}
                    ) as file_size,

                    (select count(b.block_id) from {owner}blocks b
                     join {owner}datasets d on d.dataset_id = b.dataset_id {join_valid_ds2}
                     where d.dataset=:dataset
                    ) as num_block,

                   (select count(*) from (select distinct l.lumi_section_num, l.run_num from {owner}files f
                    join {owner}file_lumis l on l.file_id=f.file_id
                    join {owner}datasets d on d.DATASET_ID = f.dataset_id {join_valid_ds2}
                    where d.dataset=:dataset {wheresql_isFileValid})
                   ) as num_lumi
                    from dual
                    """.format(owner=self.owner, wheresql_isFileValid=wheresql_isFileValid, join_valid_ds2=join_valid_ds2)
                binds.update({"dataset":dataset})
        else:
            return []

	cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        result=[]
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
	return result
Beispiel #16
0
    def execute(self,
                conn,
                dataset="",
                is_dataset_valid=1,
                parent_dataset="",
                release_version="",
                pset_hash="",
                app_name="",
                output_module_label="",
                global_tag="",
                processing_version=0,
                acquisition_era="",
                run_num=-1,
                physics_group_name="",
                logical_file_name="",
                primary_ds_name="",
                primary_ds_type="",
                processed_ds_name="",
                data_tier_name="",
                dataset_access_type="",
                prep_id="",
                create_by='',
                last_modified_by='',
                min_cdate=0,
                max_cdate=0,
                min_ldate=0,
                max_ldate=0,
                cdate=0,
                ldate=0,
                dataset_id=-1,
                transaction=False):
        if not conn:
            dbsExceptionHandler(
                "dbsException-db-conn-failed",
                "Oracle/Dataset/BriefList.  Expects db connection from upper layer."
            )
        selectsql = 'SELECT '
        joinsql = ''
        generatedsql = ''
        binds = {}
        wheresql = 'WHERE D.IS_DATASET_VALID=:is_dataset_valid '
        if dataset and type(dataset) is list:  # for the POST method
            #wheresql += " AND D.DATASET=:dataset "
            ds_generator, binds2 = create_token_generator(dataset)
            binds.update(binds2)
            wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)"
            generatedsql = "{ds_generator}".format(ds_generator=ds_generator)
            if dataset_access_type and (dataset_access_type != "%"
                                        or dataset_access_type != '*'):
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (
                    self.owner)
                op = ("=", "like")["%" in dataset_access_type
                                   or "*" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
        elif dataset_id is not None and type(
                dataset_id) is not int:  # for the POST method
            #we treat the datset_id is the same way as run_num. It can be id1-id2, id or [id1,2,3 ...]
            dataset_id_list = []
            wheresql_dataset_id_list = ''
            wheresql_dataset_id_range = ''
            for id in parseRunRange(dataset_id):
                if isinstance(id, basestring) or isinstance(
                        id, int) or isinstance(id, long):
                    dataset_id_list.append(str(id))
                if isinstance(id, run_tuple):
                    if id[0] == id[1]:
                        dbsExceptionHandler(
                            'dbsException-invalid-input',
                            "DBS dataset_id range must be apart at least by 1."
                        )
                    wheresql_dataset_id_range = " D.DATASET_ID between :minid and :maxid "
                    binds.update({"minid": id[0]})
                    binds.update({"maxid": id[1]})
            if dataset_id_list:
                ds_generator, binds2 = create_token_generator(dataset_id_list)
                binds.update(binds2)
                wheresql_dataset_id_list = " D.DATASET_ID in (SELECT TOKEN FROM TOKEN_GENERATOR)"
                generatedsql = "{ds_generator}".format(
                    ds_generator=ds_generator)
            if dataset_access_type and (dataset_access_type != "%"
                                        or dataset_access_type != '*'):
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (
                    self.owner)
                op = ("=", "like")["%" in dataset_access_type
                                   or "*" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
            if wheresql_dataset_id_list and wheresql_dataset_id_range:
                wheresql += " and  (" + wheresql_dataset_id_list + " or " + wheresql_dataset_id_range + " )"
            elif wheresql_dataset_id_list and not wheresql_dataset_id_range:
                wheresql += " and " + wheresql_dataset_id_list
            elif not wheresql_dataset_id_list and wheresql_dataset_id_range:
                wheresql += " and " + wheresql_dataset_id_range
        else:  #for the GET method
            binds.update(is_dataset_valid=is_dataset_valid)
            if cdate != 0:
                wheresql += "AND D.CREATION_DATE = :cdate "
                binds.update(cdate=cdate)
            elif min_cdate != 0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate "
                binds.update(min_cdate=min_cdate)
                binds.update(max_cdate=max_cdate)
            elif min_cdate != 0 and max_cdate == 0:
                wheresql += "AND D.CREATION_DATE > :min_cdate "
                binds.update(min_cdate=min_cdate)
            elif min_cdate == 0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE < :max_cdate "
                binds.update(max_cdate=max_cdate)
            else:
                pass

            if ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate "
                binds.update(ldate=ldate)
            elif min_ldate != 0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate "
                binds.update(min_ldate=min_ldate)
                binds.update(max_ldate=max_ldate)
            elif min_ldate != 0 and max_ldate == 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate "
                binds.update(min_ldate=min_ldate)
            elif min_ldate == 0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate "
                binds.update(max_ldate=max_ldate)
            else:
                pass
            if create_by:
                wheresql += " AND D.CREATE_BY = :create_by "
                binds.update(create_by=create_by)
            if last_modified_by:
                wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by "
                binds.update(last_modified_by=last_modified_by)
            if prep_id:
                wheresql += "AND D.prep_id = :prep_id "
                binds.update(prep_id=prep_id)
            if dataset and isinstance(dataset, basestring) and dataset != "%":
                op = ("=", "like")["%" in dataset]
                wheresql += " AND D.DATASET %s :dataset " % op
                binds.update(dataset=dataset)
            if dataset_id != -1:
                wheresql += " AND D.DATASET_ID = :dataset_id "
                binds.update(dataset_id=dataset_id)
            if primary_ds_name and primary_ds_name != "%":
                joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % (
                    self.owner)
                op = ("=", "like")["%" in primary_ds_name]
                wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op
                binds.update(primary_ds_name=primary_ds_name)
            if primary_ds_type and primary_ds_type != "%":
                if not primary_ds_name:
                    joinsql += " JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID " % (
                        self.owner)
                joinsql += " JOIN %sPRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID " % (
                    self.owner)
                op = ("=", "like")["%" in primary_ds_type]
                wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " % op
                binds.update(primary_ds_type=primary_ds_type)

            if processed_ds_name and processed_ds_name != "%":
                joinsql += " JOIN %sPROCESSED_DATASETS PR ON PR.PROCESSED_DS_ID = D.PROCESSED_DS_ID " % (
                    self.owner)
                op = ("=", "like")["%" in processed_ds_name]
                wheresql += " AND PR.PROCESSED_DS_NAME %s :processed_ds_name " % op
                binds.update(processed_ds_name=processed_ds_name)

            if data_tier_name and data_tier_name != "%":
                joinsql += " JOIN %sDATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID " % (
                    self.owner)
                op = ("=", "like")["%" in data_tier_name]
                wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op
                binds.update(data_tier_name=data_tier_name)

            if dataset_access_type and (dataset_access_type != "%"
                                        or dataset_access_type != '*'):
                joinsql += " JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID " % (
                    self.owner)
                op = ("=", "like")["%" in dataset_access_type
                                   or "*" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op
                binds.update(dataset_access_type=dataset_access_type)

            if physics_group_name and physics_group_name != "%":
                joinsql += " LEFT OUTER JOIN %sPHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID " % (
                    self.owner)
                op = ("=", "like")["%" in physics_group_name]
                wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " % op
                binds.update(physics_group_name=physics_group_name)

            if parent_dataset:
                joinsql += """
                    LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID
                    LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID
                    """ % ((self.owner, ) * 2)
                wheresql += " AND PDS.DATASET = :parent_dataset "
                binds.update(parent_dataset=parent_dataset)

            if release_version or pset_hash or app_name or output_module_label or global_tag:
                joinsql += """
                    LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID 
                    LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID  
                """ % ((self.owner, ) * 2)

            if release_version:
                joinsql += " LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID " % (
                    self.owner)
                op = ("=", "like")["%" in release_version]
                wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op
                binds.update(release_version=release_version)

            if pset_hash:
                joinsql += " LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID " % (
                    self.owner)
                op = ("=", "like")["%" in pset_hash]
                wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op
                binds.update(pset_hash=pset_hash)

            if app_name:
                joinsql += " LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID " % (
                    self.owner)
                op = ("=", "like")["%" in app_name]
                wheresql += " AND AEX.APP_NAME %s :app_name " % op
                binds.update(app_name=app_name)

            if output_module_label:
                op = ("=", "like")["%" in output_module_label]
                wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label " % op
                binds.update(output_module_label=output_module_label)

            if global_tag:
                op = ("=", "like")["%" in global_tag]
                wheresql += " AND OMC.GLOBAL_TAG  %s :global_tag " % op
                binds.update(global_tag=global_tag)

            if processing_version != 0:
                joinsql += " LEFT OUTER JOIN %sPROCESSING_ERAS PE ON PE.PROCESSING_ERA_ID = D.PROCESSING_ERA_ID " % (
                    self.owner)
                #op = ("=", "like")["%" in processing_version]
                op = "="
                wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op
                binds.update(pversion=processing_version)

            if acquisition_era:
                joinsql += " LEFT OUTER JOIN %sACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID " % (
                    self.owner)
                op = ("=", "like")["%" in acquisition_era]
                wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op
                binds.update(aera=acquisition_era)

            if logical_file_name and logical_file_name != "%":
                selectsql += "DISTINCT "
                joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner
                wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name "
                binds.update(logical_file_name=logical_file_name)
            #
            if run_num != -1:
                if not logical_file_name:
                    selectsql += "DISTINCT "
                    joinsql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % (
                        self.owner)
                joinsql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % (
                    self.owner)
                run_list = []
                wheresql_run_list = ''
                wheresql_run_range = ''
                for r in parseRunRange(run_num):
                    if isinstance(r, basestring) or isinstance(
                            r, int) or isinstance(r, long):
                        run_list.append(str(r))
                    if isinstance(r, run_tuple):
                        if r[0] == r[1]:
                            dbsExceptionHandler(
                                'dbsException-invalid-input',
                                "DBS run range must be apart at least by 1.")
                        wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun "
                        binds.update({"minrun": r[0]})
                        binds.update({"maxrun": r[1]})
                #
                if run_list:
                    wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                    run_generator, run_binds = create_token_generator(run_list)
                    generatedsql = "{run_generator}".format(
                        run_generator=run_generator)
                    binds.update(run_binds)
                if wheresql_run_range and wheresql_run_list:
                    wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
                elif wheresql_run_range and not wheresql_run_list:
                    wheresql += " and " + wheresql_run_range
                elif not wheresql_run_range and wheresql_run_list:
                    wheresql += " and " + wheresql_run_list

        sql = "".join(
            (generatedsql, selectsql, self.basesql, joinsql, wheresql))
        #self.logger.error( sql)
        #self.logger.error( binds)
        cursors = self.dbi.processData(sql,
                                       binds,
                                       conn,
                                       transaction,
                                       returnCursor=True)
        for i in cursors:
            d = self.formatCursor(i)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #17
0
    def execute(self, conn, dataset="", is_dataset_valid=1, parent_dataset="",\
                release_version="", pset_hash="", app_name="", output_module_label="",\
                global_tag="", processing_version=0, acquisition_era="", run_num=-1,\
                physics_group_name="", logical_file_name="", primary_ds_name="",\
                primary_ds_type="", processed_ds_name="", data_tier_name="", dataset_access_type="", prep_id="",\
                create_by='', last_modified_by='', min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0,\
                ldate=0, dataset_id=-1, transaction=False):
        if not conn:
            dbsExceptionHandler(
                "dbsException-failed-connect2host",
                "%s Oracle/Dataset/List.  Expects db connection from upper layer.",
                self.logger.exception)
        sql = ""
        generatedsql = ''
        basesql = self.basesql
        binds = {}
        wheresql = "WHERE D.IS_DATASET_VALID = :is_dataset_valid "
        if dataset and type(dataset) is list:  # for the POST method
            ds_generator, binds2 = create_token_generator(dataset)
            binds.update(binds2)
            wheresql += " AND D.DATASET in (SELECT TOKEN FROM TOKEN_GENERATOR)"
            generatedsql = "{ds_generator}".format(ds_generator=ds_generator)
            if dataset_access_type and dataset_access_type != "%":
                op = ("=", "like")["%" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
            sql = generatedsql + 'SELECT' + basesql + wheresql
        elif type(dataset_id) is not int:  #for the POST method
            #we treat the datset_id is the same way as run_num. It can be id1-id2, id or [id1,2,3 ...]
            dataset_id_list = []
            wheresql_dataset_id_list = ''
            wheresql_dataset_id_range = ''
            for id in parseRunRange(dataset_id):
                if isinstance(id, basestring) or isinstance(
                        id, int) or isinstance(id, long):
                    dataset_id_list.append(str(id))
                if isinstance(id, run_tuple):
                    if id[0] == id[1]:
                        dbsExceptionHandler(
                            'dbsException-invalid-input',
                            "DBS dataset_id range must be apart at least by 1.",
                            self.logger.exception)
                    wheresql_dataset_id_range = " D.DATASET_ID between :minid and :maxid "
                    binds.update({"minid": id[0]})
                    binds.update({"maxid": id[1]})
            if dataset_id_list:
                ds_generator, binds2 = create_token_generator(dataset_id_list)
                binds.update(binds2)
                wheresql_dataset_id_list = " D.DATASET_ID in (SELECT TOKEN FROM TOKEN_GENERATOR)"
                generatedsql = "{ds_generator}".format(
                    ds_generator=ds_generator)
            if dataset_access_type and dataset_access_type != "%":
                op = ("=", "like")["%" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op
                binds['dataset_access_type'] = dataset_access_type
                binds['is_dataset_valid'] = is_dataset_valid
            else:
                binds['is_dataset_valid'] = is_dataset_valid
            if wheresql_dataset_id_list and wheresql_dataset_id_range:
                sql = generatedsql + 'SELECT' + basesql + wheresql + " and  ( "  \
                           +  wheresql_dataset_id_list + " or " + wheresql_dataset_id_range + " )"
            elif wheresql_dataset_id_list and not wheresql_dataset_id_range:
                sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_list
            elif not wheresql_dataset_id_list and wheresql_dataset_id_range:
                sql = generatedsql + 'SELECT' + basesql + wheresql + " and " + wheresql_dataset_id_range
        else:  #for the GET method
            binds.update(is_dataset_valid=is_dataset_valid)
            if cdate != 0:
                wheresql += "AND D.CREATION_DATE = :cdate "
                binds.update(cdate=cdate)
            elif min_cdate != 0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE BETWEEN :min_cdate and :max_cdate "
                binds.update(min_cdate=min_cdate)
                binds.update(max_cdate=max_cdate)
            elif min_cdate != 0 and max_cdate == 0:
                wheresql += "AND D.CREATION_DATE > :min_cdate "
                binds.update(min_cdate=min_cdate)
            elif min_cdate == 0 and max_cdate != 0:
                wheresql += "AND D.CREATION_DATE < :max_cdate "
                binds.update(max_cdate=max_cdate)
            else:
                pass
            if ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE = :ldate "
                binds.update(ldate=ldate)
            elif min_ldate != 0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate "
                binds.update(min_ldate=min_ldate)
                binds.update(max_ldate=max_ldate)
            elif min_ldate != 0 and max_ldate == 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE > :min_ldate "
                binds.update(min_ldate=min_ldate)
            elif min_ldate == 0 and max_ldate != 0:
                wheresql += "AND D.LAST_MODIFICATION_DATE < :max_ldate "
                binds.update(max_ldate=max_ldate)
            else:
                pass
            if create_by:
                wheresql += " AND D.CREATE_BY = :create_by "
                binds.update(create_by=create_by)
            if last_modified_by:
                wheresql += " AND D.LAST_MODIFIED_BY = :last_modified_by "
                binds.update(last_modified_by=last_modified_by)
            if prep_id:
                wheresql += "AND D.PREP_ID = :prep_id "
                binds.update(prep_id=prep_id)
            if dataset and dataset != "%":
                op = ("=", "like")["%" in dataset]
                wheresql += " AND D.DATASET %s :dataset " % op
                binds.update(dataset=dataset)
            if dataset_id != -1:
                wheresql += " AND D.DATASET_ID = :dataset_id "
                binds.update(dataset_id=dataset_id)
            if primary_ds_name and primary_ds_name != "%":
                op = ("=", "like")["%" in primary_ds_name]
                wheresql += " AND P.PRIMARY_DS_NAME %s :primary_ds_name " % op
                binds.update(primary_ds_name=primary_ds_name)
            if processed_ds_name and processed_ds_name != "%":
                op = ("=", "like")["%" in processed_ds_name]
                wheresql += " AND PD.PROCESSED_DS_NAME %s :processed_ds_name " % op
                binds.update(processed_ds_name=processed_ds_name)
            if data_tier_name and data_tier_name != "%":
                op = ("=", "like")["%" in data_tier_name]
                wheresql += " AND DT.DATA_TIER_NAME %s :data_tier_name " % op
                binds.update(data_tier_name=data_tier_name)
            if dataset_access_type and dataset_access_type != "%":
                op = ("=", "like")["%" in dataset_access_type]
                wheresql += " AND DP.DATASET_ACCESS_TYPE %s :dataset_access_type " % op
                binds.update(dataset_access_type=dataset_access_type)
            if primary_ds_type and primary_ds_type != "%":
                op = ("=", "like")["%" in primary_ds_type]
                wheresql += " AND PDT.PRIMARY_DS_TYPE %s :primary_ds_type " % op
                binds.update(primary_ds_type=primary_ds_type)
            if physics_group_name and physics_group_name != "%":
                op = ("=", "like")["%" in physics_group_name]
                wheresql += " AND PH.PHYSICS_GROUP_NAME %s :physics_group_name " % op
                binds.update(physics_group_name=physics_group_name)

            if parent_dataset:

                basesql = "PDS.DATASET PARENT_DATASET, " + basesql
                basesql += """
    		LEFT OUTER JOIN %sDATASET_PARENTS DSP ON DSP.THIS_DATASET_ID = D.DATASET_ID 
    		LEFT OUTER JOIN %sDATASETS PDS ON PDS.DATASET_ID = DSP.PARENT_DATASET_ID 
    		""" % ((self.owner, ) * 2)
                wheresql += " AND PDS.DATASET = :parent_dataset "
                binds.update(parent_dataset=parent_dataset)

            if release_version or pset_hash or app_name or output_module_label:
                basesql = """
    			 OMC.OUTPUT_MODULE_LABEL, OMC.GLOBAL_TAG, 
    			 RV.RELEASE_VERSION,
    			 PSH.PSET_HASH,
    			 AEX.APP_NAME, """ + basesql

                basesql += """
    		LEFT OUTER JOIN %sDATASET_OUTPUT_MOD_CONFIGS DOMC ON DOMC.DATASET_ID = D.DATASET_ID
    		LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = DOMC.OUTPUT_MOD_CONFIG_ID
    		LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID
    		LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID
    		LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID 
                """ % ((self.owner, ) * 5)

            if release_version:
                op = ("=", "like")["%" in release_version]
                wheresql += " AND RV.RELEASE_VERSION %s :release_version " % op
                binds.update(release_version=release_version)
            if pset_hash:
                op = ("=", "like")["%" in pset_hash]
                wheresql += " AND PSH.PSET_HASH %s :pset_hash " % op
                binds.update(pset_hash=pset_hash)
            if app_name:
                op = ("=", "like")["%" in app_name]
                wheresql += " AND AEX.APP_NAME %s :app_name " % op
                binds.update(app_name=app_name)
            if output_module_label:
                op = ("=", "like")["%" in output_module_label]
                wheresql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label " % op
                binds.update(output_module_label=output_module_label)
            if global_tag:
                op = ("=", "like")["%" in global_tag]
                wheresql += " AND OMC.GLOBAL_TAG  %s :global_tag " % op
                binds.update(global_tag=global_tag)
            if processing_version != 0:
                #op = ("=", "like")["%" in processing_version]
                op = "="
                wheresql += " AND PE.PROCESSING_VERSION %s :pversion " % op
                binds.update(pversion=processing_version)
            if acquisition_era:
                op = ("=", "like")["%" in acquisition_era]
                wheresql += " AND AE.ACQUISITION_ERA_NAME %s :aera " % op
                binds.update(aera=acquisition_era)

            # This should resolve to original cases that were in the business logic
            if (not logical_file_name or logical_file_name == "%") and (run_num
                                                                        == -1):
                # """JUST EXECUTE THE QUERY HERE"""
                sql = "SELECT " + basesql + wheresql

            elif (run_num
                  == -1) and logical_file_name and logical_file_name != "%":
                # """DO execute 1 thingy"""
                sql = "SELECT DISTINCT " + basesql
                sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner
                wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name "
                binds.update(logical_file_name=logical_file_name)
                sql += wheresql

            elif (run_num != -1):
                # """Do execute 2 thingy"""
                sql += "SELECT DISTINCT " + basesql
                if logical_file_name:
                    sql += "JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % self.owner
                    wheresql += " AND FL.LOGICAL_FILE_NAME = :logical_file_name "
                    binds.update(logical_file_name=logical_file_name)
                else:
                    sql += " JOIN %sFILES FL on FL.DATASET_ID = D.DATASET_ID " % (
                        self.owner)
            #
                sql += " JOIN %sFILE_LUMIS FLLU on FLLU.FILE_ID=FL.FILE_ID " % (
                    self.owner)
                run_list = []
                wheresql_run_list = ''
                wheresql_run_range = ''
                for r in parseRunRange(run_num):
                    if isinstance(r, basestring) or isinstance(
                            r, int) or isinstance(r, long):
                        run_list.append(str(r))
                    if isinstance(r, run_tuple):
                        if r[0] == r[1]:
                            dbsExceptionHandler(
                                'dbsException-invalid-input',
                                "DBS run range must be apart at least by 1.",
                                self.logger.exception)
                        wheresql_run_range = " FLLU.RUN_NUM between :minrun and :maxrun "
                        binds.update({"minrun": r[0]})
                        binds.update({"maxrun": r[1]})
            #
                if run_list:
                    wheresql_run_list = " FLLU.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                    run_generator, run_binds = create_token_generator(run_list)
                    sql = "{run_generator}".format(
                        run_generator=run_generator) + sql
                    binds.update(run_binds)
                if wheresql_run_range and wheresql_run_list:
                    wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
                elif wheresql_run_range and not wheresql_run_list:
                    wheresql += " and " + wheresql_run_range
                elif not wheresql_run_range and wheresql_run_list:
                    wheresql += " and " + wheresql_run_list

                sql += wheresql
            else:
                dbsExceptionHandler(
                    "dbsException-invalid-input",
                    "Oracle/Dataset/List. Proper parameters are not\
                    provided for listDatasets call.", self.logger.exception)
        #self.logger.error( sql)
        #self.logger.error("binds=%s" %binds)
        cursors = self.dbi.processData(sql,
                                       binds,
                                       conn,
                                       transaction,
                                       returnCursor=True)
        for i in cursors:
            d = self.formatCursor(i, size=100)
            if isinstance(d, list) or isinstance(d, GeneratorType):
                for elem in d:
                    yield elem
            elif d:
                yield d
Beispiel #18
0
Datei: List.py Projekt: dmwm/DBS
    def execute(self, conn, logical_file_name='', block_name='', run_num=-1, validFileOnly=0, migration=False):
        """
        Lists lumi section numbers with in a file, a list of files or a block.
        """
	sql = ""
	wheresql = ""
	lfn_generator = ""
	run_generator = ""
        if logical_file_name and not isinstance(logical_file_name, list):
            binds = {'logical_file_name': logical_file_name}
            if int(validFileOnly) == 0:
		if migration:   #migration always call with single file and include all files no matter valid or not.
		    sql = self.sql + """ FROM {owner}FILE_LUMIS FL 
				     JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID 
				     WHERE F.LOGICAL_FILE_NAME = :logical_file_name 
				  """.format(owner=self.owner)
		else:
		    sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL
				    JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID 
                                     WHERE F.LOGICAL_FILE_NAME = :logical_file_name 
                                  """.format(owner=self.owner)
            else:
                sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL 
				     JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
				     JOIN {owner}DATASETS D ON  D.DATASET_ID = F.DATASET_ID
				     JOIN {owner}DATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID		
				     WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME = :logical_file_name 
				     AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION') 
				 """.format(owner=self.owner)
        elif logical_file_name and isinstance(logical_file_name, list):
	    sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID """.format(owner=self.owner)	
            lfn_generator, binds = create_token_generator(logical_file_name)
            if int(validFileOnly) == 0:
                wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
            else:
		sql = sql + """ JOIN {owner}DATASETS D ON  D.DATASET_ID = F.DATASET_ID 
				JOIN {owner}DATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID
		            """.format(owner=self.owner)			
                wheresql = """ WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR) 
			       AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')
			   """
            sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=sql, wheresql=wheresql)
        elif block_name:
            binds = {'block_name': block_name}
            if int(validFileOnly) == 0:
                sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID  
				     JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID  
				     WHERE B.BLOCK_NAME = :block_name""".format(owner=self.owner)
            else:
                sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID 
				     JOIN {owner}DATASETS D ON  D.DATASET_ID = F.DATASET_ID 
				     JOIN {owner}DATASET_ACCESS_TYPES DT ON  DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID 
				     JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID
				     WHERE F.IS_FILE_VALID = 1 AND B.BLOCK_NAME = :block_name 
				     AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')	
				""".format(owner=self.owner)
        else:
                dbsExceptionHandler('dbsException-invalid-input2', "FileLumi/List: Either logocal_file_name or block_name must be provided.", self.logger.exception, "FileLumi/List: Either logocal_file_name or block_name must be provided.")
          #
        if run_num != -1:
            run_list = []
            wheresql_run_list=''
            wheresql_run_range=''
            for r in parseRunRange(run_num):
                if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long) or isinstance(r, str):
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input2', "DBS run range must be apart at least by 1.",
			self.logger.exception, "DBS run range must be apart at least by 1.")
                    wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
                    binds.update({"minrun":r[0]})
                    binds.update({"maxrun":r[1]})
            #
            if run_list:
		if len(run_list) == 1:
		    wheresql_run_list = " fl.RUN_NUM = :single_run "
		    binds.update({"single_run": long(run_list[0])})

		else:
		    wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                    run_generator, run_binds = create_token_generator(run_list)
                    sql =  "{run_generator}".format(run_generator=run_generator) + sql
                    binds.update(run_binds)

            if wheresql_run_range and wheresql_run_list:
                sql += " and (" + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " and " + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " and " + wheresql_run_list
        self.logger.debug(sql) 
	self.logger.debug(binds)
	if run_generator and lfn_generator:
		dbsExceptionHandler('dbsException-invalid-input2', "listFileLumiArray support single list of lfn or run_num. ",
			self.logger.exception, "listFileLumiArray support single list of lfn or run_num. ")
        cursors = self.dbi.processData(sql, binds, conn, transaction=False, returnCursor=True)
        result=[]
	file_run_lumi={}
        event_ct=False
        for i in cursors:
            result.extend(self.formatCursor(i, size=100))
        #for migration, we need flat format to load the data into another DB.
        #self.logger.error(result) 
        if migration:
            #YG 09/2015. 
	    for item in result:
		yield item
	else:
            if result and result[0]['event_count']: 
                event_ct = True
	    for i in result:
		r = i['run_num']
		f = i['logical_file_name']
                if event_ct:
                    file_run_lumi.setdefault((f, r), []).append([i['lumi_section_num'], i['event_count']])
                else:
                    file_run_lumi.setdefault((f, r), []).append(i['lumi_section_num'])
	    for k, v in file_run_lumi.iteritems():
                if event_ct:
                    lumi=[]
                    event=[]
                    for le in v:
                        lumi.append(le[0])
                        event.append(le[1])
                    yield {'logical_file_name':k[0], 'run_num':k[1], 'lumi_section_num':lumi, 'event_count':event}
                else:
                    yield {'logical_file_name':k[0], 'run_num':k[1], 'lumi_section_num':v}
        del file_run_lumi
        del result
Beispiel #19
0
    def execute(self, conn, dataset="", block_name="", logical_file_name="",
                release_version="", pset_hash="", app_name="", output_module_label="",
                run_num=-1, origin_site_name="", lumi_list=[], transaction=False):
        if not conn:
            dbsExceptionHandler("dbsException-db-conn-failed","Oracle/File/List. Expects db connection from upper layer.")
        sql = self.sql_cond
        binds = {}
        sql_sel = self.sql_sel
        run_generator = ''

        if run_num != -1 :
            sql_sel = sql_sel.replace("SELECT", "SELECT DISTINCT") + " , FL.RUN_NUM  "
            sql += " JOIN %sFILE_LUMIS FL on  FL.FILE_ID=F.FILE_ID " %(self.owner)
        if release_version or pset_hash or app_name or output_module_label :
            sql += """LEFT OUTER JOIN %sFILE_OUTPUT_MOD_CONFIGS FOMC ON FOMC.FILE_ID = F.FILE_ID
                      LEFT OUTER JOIN %sOUTPUT_MODULE_CONFIGS OMC ON OMC.OUTPUT_MOD_CONFIG_ID = FOMC.OUTPUT_MOD_CONFIG_ID
                      LEFT OUTER JOIN %sRELEASE_VERSIONS RV ON RV.RELEASE_VERSION_ID = OMC.RELEASE_VERSION_ID
                      LEFT OUTER JOIN %sPARAMETER_SET_HASHES PSH ON PSH.PARAMETER_SET_HASH_ID = OMC.PARAMETER_SET_HASH_ID
                      LEFT OUTER JOIN %sAPPLICATION_EXECUTABLES AEX ON AEX.APP_EXEC_ID = OMC.APP_EXEC_ID
                      """ % ((self.owner,)*5)
	      #FIXME : the status check should only be done with normal/super user
        #sql += """WHERE F.IS_FILE_VALID = 1"""
        # for the time being lests list all files
        sql += """WHERE F.IS_FILE_VALID <> -1 """
        if block_name:
            sql += " AND B.BLOCK_NAME = :block_name"
            binds.update({"block_name":block_name})
        if logical_file_name:
            op = ("=", "like")["%" in logical_file_name]
            sql += " AND F.LOGICAL_FILE_NAME %s :logical_file_name" % op
            binds.update({"logical_file_name":logical_file_name})
        if dataset:
            sql += " AND D.DATASET = :dataset"
            binds.update({"dataset":dataset})
        if release_version:
            op = ("=", "like")["%" in release_version]
            sql += " AND RV.RELEASE_VERSION %s :release_version" % op
            binds.update({"release_version":release_version})
        if pset_hash:
            op = ("=", "like")["%" in pset_hash]
            sql += " AND PSH.PSET_HASH %s :pset_hash" % op
            binds.update({"pset_hash" :pset_hash})
        if app_name:
            op = ("=", "like")["%" in app_name]
            sql += " AND AEX.APP_NAME %s :app_name" % op
            binds.update({"app_name":  app_name})
        if output_module_label:
            op = ("=", "like")["%" in output_module_label]
            sql += " AND OMC.OUTPUT_MODULE_LABEL  %s :output_module_label" %op
            binds.update({"output_module_label":output_module_label})
        if (origin_site_name):
            op = ("=","like")["%" in origin_site_name]
            sql += " AND B.ORIGIN_SITE_NAME %s  :origin_site_name" % op
            binds.update({"origin_site_name":origin_site_name})
        if run_num != -1 :
            run_list=[]
            wheresql_run_list=''
            wheresql_run_range=''

            for r in parseRunRange(run_num):
                if isinstance(r, str) or isinstance(r, int) or isinstance(r, long):
                    run_list.append(str(r))
                if isinstance(r, run_tuple):
                    if r[0] == r[1]:
                        dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.")
                    if not lumi_list:
                        wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
                        binds.update({"minrun":r[0]})
                        binds.update({"maxrun":r[1]})
                    else:
                        dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.")
            #
            if run_list and not lumi_list:
                wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
                run_generator, run_binds = create_token_generator(run_list)
                #sql =  "{run_generator}".format(run_generator=run_generator) + sql
                binds.update(run_binds)
            if wheresql_run_range and wheresql_run_list:
                sql += " and (" + wheresql_run_range + " or " +  wheresql_run_list + " )"
            elif wheresql_run_range and not wheresql_run_list:
                sql += " and " + wheresql_run_range
            elif not wheresql_run_range and wheresql_run_list:
                sql += " and "  + wheresql_run_list
        # Make sure when we have a lumi_list, there is only ONE run  -- YG 14/05/2013
        if (lumi_list and len(lumi_list) != 0):
            if len(run_list) != 1:
                dbsExceptionHandler('dbsException-invalid-input', "When lumi_list is given, only one run is allowed.")         
            sql += " AND fl.RUN_NUM = :run_num  AND FL.LUMI_SECTION_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
            #Do I need to convert lumi_list to be a str list? YG 10/03/13
            #Yes, you do. YG
            lumi_list = map(str, lumi_list)
            lumi_generator, lumi_binds = create_token_generator(lumi_list)
            sql_sel = "{lumi_generator}".format(lumi_generator=lumi_generator) + sql_sel
            binds.update(lumi_binds)
            binds["run_num"]=run_list[0]
        #
        sql = run_generator + sql_sel + sql

        cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
        result = []
        for i in range(len(cursors)):
            result.extend(self.formatCursor(cursors[i]))
	return result